@phdthesis{Koch, author = {Koch, Thorsten}, title = {Rapid Mathematical Programming}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-8346}, number = {04-58}, abstract = {The thesis deals with the implementation and application of out-of-the-box tools in linear and mixed integer programming. It documents the lessons learned and conclusions drawn from five years of implementing, maintaining, extending, and using several computer codes to solve real-life industrial problems. By means of several examples it is demonstrated how to apply algebraic modeling languages to rapidly devise mathematical models of real-world problems. It is shown that today's MIP solvers are capable of solving the resulting mixed integer programs, leading to an approach that delivers results very quickly. Even though, problems are tackled that not long ago required the implementation of specialized branch-and-cut algorithms. In the first part of the thesis the modeling language Zimpl is introduced. Chapter 2 contains a complete description of the language. In the subsequent chapter details of the implementation are described. Both theoretical and practical considerations are discussed. Aspects of software engineering, error prevention, and detection are addressed. In the second part several real-world projects are examined that employed the methodology and the tools developed in the first part. Chapter 4 presents three projects from the telecommunication industry dealing with facility location problems. Chapter 5 characterizes questions that arise in UMTS planning. Problems, models, and solutions are discussed. Special emphasis is put on the dependency of the precision of the input data and the results. Possible reasons for unexpected and undesirable solutions are explained. Finally, the Steiner tree packing problem in graphs, a well-known hard combinatorial problem, is revisited. A formerly known, but not yet used model is applied to combine switchbox wire routing and via minimization. All instances known from the literature are solved by this approach, as are some newly generated bigger problem instances.}, language = {en} } @phdthesis{Achterberg, author = {Achterberg, Tobias}, title = {Constraint Integer Programming}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11129}, abstract = {This thesis introduces the novel paradigm of "constraint integer programming" (CIP), which integrates constraint programming (CP) and mixed integer programming (MIP) modeling and solving techniques. It is supplemented by the software SCIP, which is a solver and framework for constraint integer programming that also features SAT solving techniques. SCIP is freely available in source code for academic and non-commercial purposes. Our constraint integer programming approach is a generalization of MIP that allows for the inclusion of arbitrary constraints, as long as they turn into linear constraints on the continuous variables after all integer variables have been fixed. The constraints, may they be linear or more complex, are treated by any combination of CP and MIP techniques: the propagation of the domains by constraint specific algorithms, the generation of a linear relaxation and its solving by LP methods, and the strengthening of the LP by cutting plane separation. The current version of SCIP comes with all of the necessary components to solve mixed integer programs. In the thesis, we cover most of these ingredients and present extensive computational results to compare different variants for the individual building blocks of a MIP solver. We focus on the algorithms and their impact on the overall performance of the solver. In addition to mixed integer programming, the thesis deals with chip design verification, which is an important topic of electronic design automation. Chip manufacturers have to make sure that the logic design of a circuit conforms to the specification of the chip. Otherwise, the chip would show an erroneous behavior that may cause failures in the device where it is employed. An important subproblem of chip design verification is the property checking problem, which is to verify whether a circuit satisfies a specified property. We show how this problem can be modeled as constraint integer program and provide a number of problem-specific algorithms that exploit the structure of the individual constraints and the circuit as a whole. Another set of extensive computational benchmarks compares our CIP approach to the current state-of-the-art SAT methodology and documents the success of our method.}, language = {en} } @phdthesis{Roeblitz, author = {R{\"o}blitz, Susanna}, title = {Statistical Error Estimation and Grid-free Hierarchical Refinement in Conformation Dynamics}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000008079-9}, abstract = {The understanding of geometric structures and dynamical properties of molecular conformations gives insight into molecular long-term behavior. The identification of metastable conformations together with their life times and transition patterns is the intention of conformation dynamics. Conformation dynamics is a multi-scale approach that leads to a reduced description of the dynamical system in terms of a stochastic transition probability matrix. The present thesis deals with the error analysis of computed matrices and the resulting matrix functions. Since conformational membership vectors, as they are computed by the Robust Perron Cluster Analysis (PCCA+), form an invariant subspace of the transition matrix, subspace-based error estimators are of particular interest. The decomposition of the state space into basis functions and the approximation of integrals by Monte-Carlo quadrature give rise to row-wise correlated random matrices, for which stochastic norms are computed. Together with an appropriate statistical model for the distribution of matrix rows, this allows for the calculation of error bounds and error distributions of the invariant subspace and other variables of interest. Equilibration of errors among the basis functions can be achieved by enhanced sampling in regions where the trajectories are mixing slowly. Hierarchical refinement of such basis functions systematically improves the clustering into metastable conformations by reducing the error in the corresponding invariant subspace. These techniques allow for an evaluation of simulation results and pave the way for the analysis of larger molecules. Moreover, the extension of PCCA+ to non-reversible Markov chains, verified by the corresponding perturbation theory, and the modification of the objective function for the case of soft membership vectors represent a further generalization of the clustering method, thus continuing the development from PCCA over PCCA+ to PCCA++. The methods developed in this thesis are useful for but not limited to conformation dynamics. In fact, they are applicable to a broader class of problems which combine domain decomposition with Monte-Carlo quadrature. Possible application areas may include the chemical master equation or quantum dynamical systems.}, language = {en} } @phdthesis{LutzWestphal, author = {Lutz-Westphal, Brigitte}, title = {Kombinatorische Optimierung -Inhalte und Methoden f{\"u}r einen authentischen Mathematikunterricht}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10608}, abstract = {wird nachgereicht}, language = {en} } @phdthesis{Wagler, author = {Wagler, Annegret}, title = {Critical Edges in Perfect Graphs}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10220}, language = {en} } @phdthesis{Weber, author = {Weber, Marcus}, title = {Meshless Methods in Confirmation Dynamics}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10232}, language = {en} } @phdthesis{Galliat, author = {Galliat, Tobias}, title = {Adaptive Multilevel Cluster Analysis by Self-Organizing Box Maps}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-2002001258}, abstract = {Als Cluster Analyse bezeichnet man den Prozess der Suche und Beschreibung von Gruppen (Clustern) von Objekten, so daß die Objekte innerhalb eines Clusters bez{\"u}glich eines gegebenen Maßes maximal homogen sind. Die Homogenit{\"a}t der Objekte h{\"a}ngt dabei direkt oder indirekt von den Auspr{\"a}gungen ab, die sie f{\"u}r eine Anzahl festgelegter Attribute besitzen. Die Suche nach Clustern l{\"a}ßt sich somit als Optimierungsproblem auffassen, wobei die Anzahl der Cluster vorher bekannt sein muß. Wenn die Anzahl der Objekte und der Attribute groß ist, spricht man von komplexen, hoch-dimensionalen Cluster Problemen. In diesem Fall ist eine direkte Optimierung zu aufwendig, und man ben{\"o}tigt entweder heuristische Optimierungsverfahren oder Methoden zur Reduktion der Komplexit{\"a}t. In der Vergangenheit wurden in der Forschung fast ausschließlich Verfahren f{\"u}r geometrisch basierte Clusterprobleme entwickelt. Bei diesen Problemen lassen sich die Objekte als Punkte in einem von den Attributen aufgespannten metrischen Raum modellieren; das verwendete Homogenit{\"a}tsmaß basiert auf der geometrischen Distanz der den Objekten zugeordneten Punkte. Insbesondere zur Bestimmung sogenannter metastabiler Cluster sind solche Verfahren aber offensichtlich nicht geeignet, da metastabile Cluster, die z.B. in der Konformationsanalyse von Biomolek{\"u}len von zentraler Bedeutung sind, nicht auf einer geometrischen, sondern einer dynamischen {\"A}hnlichkeit beruhen. In der vorliegenden Arbeit wird ein allgemeines Clustermodell vorgeschlagen, das zur Modellierung geometrischer, wie auch dynamischer Clusterprobleme geeignet ist. Es wird eine Methode zur Komplexit{\"a}tsreduktion von Clusterproblemen vorgestellt, die auf einer zuvor generierten Komprimierung der Objekte innerhalb des Datenraumes basiert. Dabei wird bewiesen, daß eine solche Reduktion die Clusterstruktur nicht zerst{\"o}rt, wenn die Komprimierung fein genug ist. Mittels selbstorganisierter neuronaler Netze lassen sich geeignete Komprimierungen berechnen. Um eine signifikante Komplexit{\"a}tsreduktion ohne Zerst{\"o}rung der Clusterstruktur zu erzielen, werden die genannten Methoden in ein mehrstufiges Verfahren eingebettet. Da neben der Identifizierung der Cluster auch deren effiziente Beschreibung notwendig ist, wird ferner eine spezielle Art der Komprimierung vorgestellt, der eine Boxdiskretisierung des Datenraumes zugrunde liegt. Diese erm{\"o}glicht die einfache Generierung von regelbasierten Clusterbeschreibungen. F{\"u}r einen speziellen Typ von Homogenit{\"a}tsfunktionen, die eine stochastische Eigenschaft besitzen, wird das mehrstufige Clusterverfahren um eine Perroncluster Analyse erweitert. Dadurch wird die Anzahl der Cluster, im Gegensatz zu herk{\"o}mmlichen Verfahren, nicht mehr als Eingabeparameter ben{\"o}tigt. Mit dem entwickelten Clusterverfahren kann erstmalig eine computergest{\"u}tzte Konformationsanalyse großer, f{\"u}r die Praxis relevanter Biomolek{\"u}le durchgef{\"u}hrt werden. Am Beispiel des HIV Protease Inhibitors VX-478 wird dies detailliert beschrieben.}, language = {en} } @phdthesis{Stolle, author = {Stolle, Hermann}, title = {Mathematische Modellierung und L{\"o}sung von Optimierungsproblemen bei der Planung von Telefonnetzen}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-1419}, language = {de} } @phdthesis{Rambau, author = {Rambau, J{\"o}rg}, title = {Polyhedral Subdivisions and Projections of Polytopes}, isbn = {3-8265-1955-8}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10271}, abstract = {The present dissertation deals with the structure of polyhedral subdivisions of point configurations. Of particular interest are the global properties of the set of all subdivisions of a given point configuration. An important open problem in this context is the following: can one always transform any triangulation of a given point configuration to any other triangulation of the same configuration by means of bistellar operations? In other words, is the set of all triangulations of a given point configuration always bistellarly connected? The results presented in this thesis contribute progress from two directions. \begin{itemize} \item The set of all subdivisions that are induced by a polytope projection is in general not bistellarly connected in a generalized sense. This result is obtained by constructing a counterexample to the so-called Generalized Baues Conjecture.'' \item The set of all triangulations of a cyclic polytope forms a bounded poset. The covering relations are given by increasing bistellar operations. Thus we get an affirmative answer to the above question in the case of cyclic polytopes. \end{itemize} In the introduction, the mathematical environment of the structures under consideration is illuminated. The "Generalized Baues Conjecture" has connections to various mathematical concepts, such as combinatorial models for loop spaces, discriminants of polynomials in several variables, etc. The triangulation posets of cyclic polytopes are natural generalizations of the well-studied Tamari lattices in order theory. Moreover, there is a connection to the higher Bruhat orders, which have similar structural properties. As a by-product, the investigations yield the shellability of all triangulations of cyclic polytopes without new vertices. This is in particular interesting because most triangulations of cyclic polytopes are non-regular.}, language = {en} } @phdthesis{Borndoerfer, author = {Bornd{\"o}rfer, Ralf}, title = {Aspects of Set Packing, Partitioning, and Covering}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10126}, abstract = {Diese Dissertation befaßt sich mit ganzzahligen Programmen mit 0/1 Systemen: SetPacking-, Partitioning- und Covering-Probleme. Die drei Teile der Dissertation behandeln polyedrische, algorithmische und angewandte Aspekte derartiger Modelle.}, language = {en} } @phdthesis{Eisenblaetter, author = {Eisenbl{\"a}tter, Andreas}, title = {Frequency Assignment in GSM Networks: Models, Heuristics, and Lower Bounds}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10132}, abstract = {Mobile cellular communcication is a key technology in today's information age. Despite the continuing improvements in equipment design, interference is and will remain a limiting factor for the use of radio communication. This Ph. D. thesis investigates how to prevent interference to the largest possible extent when assigning the available frequencies to the base stations of a GSM cellular network. The topic is addressed from two directions: first, new algorithms are presented to compute "good" frequency assignments fast; second, a novel approach, based on semidef inite programming, is employed to provide lower bounds for the amount of unavoidable interference. The new methods proposed for automatic frequency planning are compared in terms of running times and effectiveness in computational experiments, where the planning instances are taken from practice. For most of the heuristics the running time behavior is adequate for inter active planning; at the same time, they provide reasonable assignments from a practical point of view (compared to the currently best known, but substantially slower planning methods). In fact, several of these methods are successfully applied by the German GSM network operator E-Plus. The currently best lower bounds on the amount of unavoidable (co-channel) interference are obtained from solving semidefinite programs These programs arise as nonpolyhedral relaxation of a minimum /c-parti tion problem on complete graphs. The success of this approach is made plausible by revealing structural relations between the feasible set of the semidefinite program and a polytope associated with an integer linear programming formulation of the minimum ^-partition problem. Comparable relations are not known to hold for any polynomial time solvable polyhedral relaxation of the minimum ^-partition problem. The appli cation described is one of the first of semidefinite programming for large industrial problems in combinatorial optimization.}, language = {en} } @phdthesis{Fereirra, author = {Fereirra, Carlos Eduardo}, title = {On Combinatorial Optimization Problems Arising in Computer System Design}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10146}, school = {Zuse Institute Berlin (ZIB)}, language = {en} } @phdthesis{Wessaely, author = {Wess{\"a}ly, Roland}, title = {Dimensioning Survivable Capacitated Networks}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10158}, abstract = {In der vorliegenden Dissertation untersuchen wir die Optimierung von ausfallsicheren Telekommunikationsnetzwerken. Wir pr{\"a}sentieren unterschiedliche gemischt-ganzzahlige Modelle f{\"u}r die diskrete Kapazit{\"a}tsttruktu,, sowie f{\"u}r die Sicherung des Netzes gegen den Ausfall einzelner Komponenten. Die Modelle wurden in einer Kooperation mit der E-Plus Mobilfunk GmbH verwendet. Die theoretischen Resultate wurden in Algorithmen umgesetzt und in das von uns entiickllte Netzwerksoptimierungswerkzeug Discnet (Dimensioning Survivable Capaiitated NETworks) integriert, welches seit mehreren Jahren in der Planung bei E-Plus eingesetzt wird. Wir betrachten das Transportnetzllanungsproblem eines Telekommunikationsanbieters. Dieses Problem setzt auf logischen Kommunikattonsanforerrungen zwischen den Standorten (Knoten) des zu planenden Netzes und potentiell inslallirrbaren Verbindungen (Kanten) zwischen derselben Knotenmenge auf. Ein Kapazit{\"a}tsmodell stellt die Information bereit, welche Kapazit{\"a}ten auf den potentiellen Kanten verf{\"u}gbar sind. Wir betrachten zwei Modelle. Entweder ist eine explizite Liste der verf{\"u}gbaren Kapazittten gegeben oder eine Menge von sogenannten Basiskapazit{\"a}ten, die auf jeder Kante indiviuelll kombiniert werden k{\"o}nnen. Die Basiskapazit{\"a}ten m{\"u}ßen paarweise ganzzahlige Vielfache voneinander sein. Man beachte, daß diese Eigenschaft von den internationalen Standards PDH und SDH erf{\"u}llt wiid. Ein Ausfallsicherheitsmodell stellt die Information bereit, wie das zu planende Netz gegen den Ausfall einzelner Netzkomponenten gesch{\"u}tzt werden soll. Wir betrachten sinnvolle Kombinationen der Modelle Diversification, Reservation und Path Restoration. Das erste Modell garantiert Ausfallsicherheit durch kommunikationsbedarfsabh{\"a}ngige Beschr{\"a}nkung des Prozentsatzes, der durch einzelne Netzkomponenten geroutet werden darf. Bei den beiden anderen Modelle k{\"o}nnen Kommunikationsbedarfe bei Ausfall einer Netzkomponente auf unterschiedliche Weise neu geroutet werden. Ziel der Planung ist eine ktstenminimlle Kapatit{\"a}tsentscheidung, die eine Routenllanung aller Kommunikationsbedarfe gem{\"a}ß den Ausfallsicherheitsanforderungen erm{\"o}glicht. Wir entwickeln ein Schnittebenenverfahren zur L{\"o}sung der betrachteten Optimiergngsrrobleme. Zu diesem Zweck untersuchen wir Polyeder, die mit den verschiedenen Problemen assoziiert sind. Wir pr{\"a}sentieren neue Klassen von Ungleichungen, entwickeln Separationsalgorithmen und Heuristiken. Mit dem Schnittebenenverfahren werden untere und obere Schranken f{\"u}r den Wert von Oitimall{\"o}sungen berechnet, und daher ist es m{\"o}glich, Qualit{\"a}tsgarantien f{\"u}r die berechneten L{\"o}ungen anzugeben. Parallel zur Beschreibung der implementierten Algorithmen pr{\"a}sentieren wir umfangreiche Tests mit praktisch relevanten Daten, die zu Problemen mit mehr als 2 Billionen Variablen f{\"u}hren.}, language = {en} } @phdthesis{Bley, author = {Bley, Andreas}, title = {Routing and Capacity Optimization for IP Networks}, isbn = {978-3-86727-281-0}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-15530}, abstract = {This thesis is concerned with dimensioning and routing optimization problems for communication networks that employ a shortest path routing protocol such as OSPF, IS-IS, or RIP. These protocols are widely used in the Internet. With these routing protocols, all end-to-end data streams are routed along shortest paths with respect to a metric of link lengths. The network administrator can configure the routing only by modifying this metric. In this thesis we consider the unsplittable shortest path routing variant, where each communication demand must be sent unsplit through the network. This requires that all shortest paths are uniquely determined. The major difficulties in planning such networks are that the routing can be controlled only indirectly via the routing metric and that all routing paths depend on the same routing metric. This leads to rather complicated and subtle interdependencies among the paths that comprise a valid routing. In contrast to most other routing schemes, the paths for different communication demands cannot be configured independent of each other. Part I of the thesis is dedicated to the relation between path sets and routing metrics and to the combinatorial properties of those path sets that comprise a valid unsplittable shortest path routing. Besides reviewing known approaches to find a compatible metric for a given path set (or to prove that none exists) and discussing some properties of valid path sets, we show that the problem of finding a compatible metric with integer lengths as small as possible and the problem of finding a smallest possible conflict in the given path set are both NP-hard to approximate within a constant factor. In Part II of the thesis we discuss the relation between unsplittable shortest path routing and several other routing schemes and we analyze the computational complexity of three basic unsplittable shortest path routing problems. We show that the lowest congestion that can be obtained with unsplittable shortest path routing may significantly exceed that achievable with other routing paradigms and we prove several non-approximability results for unsplittable shortest path routing problems that are stronger than those for the corresponding unsplittable flow problems. In addition, we derive various polynomial time approximation algorithms for general and special cases of these problems. In Part III of the thesis we finally develop an integer linear programming approach to solve these and more realistic unsplittable shortest path routing problems to optimality. We present alternative formulations for these problems, discuss their strength and computational complexity, and show how to derive strong valid inequalities. Eventually, we describe our implementation of this solution approach and report on the numerical results obtained for real-world problems that came up in the planning the German National Research and Education Networks G-WiN and X-WiN and for several benchmark instances.}, language = {en} } @phdthesis{Friese, author = {Friese, Tilmann}, title = {Eine Mehrgitter-Methode zur L{\"o}sung des Eigenwertproblems der komplexen Helmholtzgleichung}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-1999000133}, language = {en} } @phdthesis{Ascheuer, author = {Ascheuer, Norbert}, title = {Hamiltonian Path Problems in the On-line Optimization of Flexible Manufacturing Systems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5328}, number = {TR-96-03}, abstract = {In this thesis we describe a practical problem that we encountered in the on--line optimization of a complex Flexible Manufacturing System. In the considered system a stacker crane has to fulfill all transportation tasks (jobs) in a single aisled automatic storage system. The jobs have to be sequenced in such a way, that the time needed for the unloaded moves is minimized. The modelling of this question leads to the so--called on--line Hamiltonian path problem. We computationally compare several on--line heuristics and derive lower bounds on the value obtained by an optimal on--line strategy by analyzing two off--line Combinatorial Optimization problems: the asymmetric Hamiltonian path problem with precedence constraints, also called sequential ordering problem (SOP), and the asymmetric Hamiltonian path problem with time windows (AHPPTW). We study the SOP and AHPPTW from a polyhedral point of view and derive several new classes of valid inequalities. Based on the polyhedral investigations we develop branch\&cut algorithms for both problems and can achieve encouraging results on solving problem instances from real--world examples of the practical application.}, language = {en} } @phdthesis{Wunderling, author = {Wunderling, Roland}, title = {Paralleler und Objektorientierter Simplex}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5386}, number = {TR-96-09}, abstract = {{\footnotesize In der vorliegenden Arbeit werden neue Implementierungen des dualen und primalen revidierten Simplex-Algorithmus f{\"u}r die L{\"o}sung linearer Programme (LPs) vorgestellt. Dazu werden die Algorithmen mithilfe einer Zeilenbasis dargestellt, aus der {\"u}ber einen Spezialfall die {\"u}bliche Darstellung mit einer Spaltenbasis folgt. Beide Darstellungen sind {\"u}ber die Dualit{\"a}t eng miteinander verbunden. Ausserdem wird eine theoretische Untersuchung der numerischen Stabilit{\"a}t von Simplex-Algorithmen durchgef{\"u}hrt, und es werden verschiedene M{\"o}glichkeiten der Stabilisierung diskutiert. Beide Darstellungen der Basis werden in den Implementierungen algorithmisch ausgenutzt, wobei der Einsatz der Zeilenbasis f{\"u}r LPs mit mehr Nebenbedingungen als Variablen Geschwindigkeitsvorteile bringt. Dar{\"u}berhinaus werden weitere Beschleunigung gegen{\"u}ber anderen state-of-the-art Implentierungen erzielt, und zwar durch den Einsatz eines Phase-1 LPs, das eine gr{\"o}sstm{\"o}gliche {\"U}bereinstimmung mit dem Ausgangs-LPs aufweist, durch eine dynamische Anpassung der Faktorisierungsfrequenz f{\"u}r die Basis-Matrix und durch die Optimierung der L{\"o}sung linearer Gleichungssysteme f{\"u}r besonders d{\"u}nnbesetzte Matrizen und Vektoren. Es wurden drei Implementierungen vorgenommen. Die erste l{\"a}uft sequentiell auf einem PC oder einer Workstation. Ihre hohe numerische Stabilit{\"a}t und Effizienz durch die Integration der oben genannten Konzepte machen sie zu einem zuverl{\"a}ssigen Hilfsmittel f{\"u}r den t{\"a}glichen Einsatz z.B.~in Schnittebenenverfahren zur L{\"o}sung ganzzahliger Programme. Als Programmiersprache wurde C++ verwendet, und es wurde ein objektorientierter Software-Entwurf zugrundegelegt. Dieser leistet eine hohe Flexibilit{\"a}t und Anpassbarkeit z.B.~f{\"u}r die Integration benutzerdefinierter Pricing-Strategien. Bei den anderen beiden Implentierungen handelt es sich um parallele Versionen f{\"u}r Parallelrechner mit gemeinsamem und f{\"u}r solche mit verteiltem Speicher. Dabei wird der objektorientierte Entwurf so genutzt, dass lediglich die zus{\"a}tzlichen Aufgaben f{\"u}r die Parallelisierung (Synchronisation, Kommunikation und Verteilung der Arbeit) implementiert werden, w{\"a}hrend alle Algorithmen von der sequentiellen Implementierung geerbt werden. Die Parallelisierung setzt an vier Punkten an. Der erste und einfachste ist die parallele Berechnung eines Matrix-Vektor-Produktes. Als zweites wurden beim Pricing und Quotiententest parallele Suchalgorithmen eingesetzt. Weiter werden beim steepest-edge Pricing zwei lineare Gleichungssysteme nebenl{\"a}ufig gel{\"o}st. Schliesslich wird ein paralleles Block-Pivoting verwendet, bei dem Gleichungssysteme mehrerer aufeinanderfolgender Iterationen gleichzeitig gel{\"o}st werden. Ob und welche der Parallelisierungs-Konzepte eine Beschleunigung bewirken, ist problemabh{\"a}ngig. Es gelingt z.B.~mit 32 Prozessoren eine Beschleunigung um mehr als einen Faktor 16 zu erzielen. Schliesslich wird die parallele L{\"o}sung d{\"u}nnbesetzter linearer Gleichungssysteme mit unsymmetrischen Matrizen untersucht und eine Implementierung f{\"u}r den Cray T3D vorgenommen. Sie enth{\"a}lt ein neues Verfahren des Lastausgleichs, das keinen zus{\"a}tzlichen Aufwand verursacht. Die Implementierung erzielt vergleichsweise g{\"u}nstige Laufzeiten.}}, language = {de} } @phdthesis{Hohmann, author = {Hohmann, Andreas}, title = {Inexact Gauss Newton Methods for Parameter Dependent Nonlinear Problems.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5049}, number = {TR-93-13}, abstract = {A new approach to inexact Gauss Newton methods for the solution of underdetermined nonlinear problems is presented. It is based on a convergence theorem being invariant under affine transformations of the domain and results in an easily implementable accuracy matching strategy for the arising linear subproblems which guarantees the quadratic convergence. Thanks to the weak assumptions on the given nonlinear problem, the results provide a general framework for multilevel Newton and continuation methods. As an example, a new multilevel Newton h-p collocation method for boundary value problems of ordinary differential equations is developed. It combines the inexact Newton method with a linear collocation solver using adaptive refinement and variable orders. The performance of the resulting C++ class library {\sc Cocon} is demonstrated by some numerical examples including singular perturbed problems. In addition, the new method is applied to a realistic railway bogie model in which a branch of periodic solutions emanates from a branch of fixed points at a Hopf bifurcation.}, language = {en} } @phdthesis{Nowak, author = {Nowak, Ulrich}, title = {Adaptive Linienmethoden f{\"u}r nichtlineare parabolische Systeme in einer Raumdimension.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5059}, number = {TR-93-14}, abstract = {A new method for the numerical solution of highly nonlinear, coupled systems of parabolic differential equations in one space dimension is presented. The approach is based on a classical method of lines treatment. Time discretization is done by means of the semi--implicit Euler discretization. Space discretization is done with finite differences on non--uniform grids. Both basic discretizations are coupled with extrapolation techniques. With respect to time the extrapolation is of variable order whereas just one extrapolation step is done in space. Based on local error estimates for both, the time and the space discretization error, the accuracy of the numerical approximation is controlled and the discretization stepsizes are adapted automatically and simultaneously. Besides the local adaptation of the space grids after each integration step (static regridding), the grid may even move within each integration step (dynamic regridding). Thus, the whole algorithm has a high degree of adaptivity. Due to this fact, challenging problems from applications can be solved in an efficient and robust way.}, language = {en} } @phdthesis{Schuette, author = {Sch{\"u}tte, Christof}, title = {A Quasiresonant Smoothing Algorithm for Solving Large Highly Differential Equations from Quantum Chemistry.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5090}, number = {TR-94-04}, abstract = {In Quantum Chemistry the field of Laser--Assisted Molecular Control'' has received a considerable amount of attention recently. One key problem in this new field is the simulation of the dynamical reaction of a molecule subjected to external radiation. This problem is described by the Schr{\"o}dinger equation, which, after eigenfunction expansion, can be written in the form of a large system of ordinary differential equations, the solutions of which show a highly oscillatory behaviour. The oscillations with high frequencies and small amplitudes confine the stepsizes of any numerical integrator -- an effect, which, in turn, blows up the simulation time. Larger stepsizes can be expected by averaging these fast oscillations, thus smoothing the trajectories. Standard smoothing techniques (averaging, filtering) would kill the whole process and thus, lead to wrong numerical results. To avoid this unwanted effect and nevertheless speed up computations, this paper presents a quasiresonant smoothing algorithm (QRS). In QRS, a natural splitting parameter \$\delta\$ controls the smoothing properties. An adaptive QRS--version (AQRS) is presented which includes an error estimation scheme for choosing this parameter \$\delta\$ in order to meet a given accuracy requirement. In AQRS \$\delta\$ is permanently adapted to the solution properties for computing the chemically necessary information'' only. The performance of AQRS is demonstrated in several test problems from the field Laser--Assisted Selective Excitation of Molecules'' in which the external radiation is a picosecond laser pulse. In comparison with standard methods speedup factors of the order of \$10^2\$ are observed.}, language = {en} } @phdthesis{Zumbusch, author = {Zumbusch, Gerhard}, title = {Simultanous h-p Adaption in Multilevel Finite Elements}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-5298}, number = {TR-95-14}, language = {en} } @phdthesis{Weismantel, author = {Weismantel, Robert}, title = {Plazieren von Zellen: Theorie und L{\"o}sung eines quadratischen 0/1- Optimierungsproblems.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4883}, number = {TR-92-03}, abstract = {Die vorliegende Arbeit besch{\"a}ftigt sich mit dem Plazierungsproblem, welches beim Entwurf elektronischer Schaltungen auftritt. Das Plazierungsproblem modellieren wir als ein quadratisches 0/1 Optimierungsproblem unter linearen Nebenbedingungen und untersuchen das Modell komplexit{\"a}tstheoretisch. Der zweite Aspekt der Arbeit bezieht sich auf die L{\"o}sung praktischer Problembeispiele im sogenannten Sea of cells"-Entwurfsstil. Zur L{\"o}sung dieser Beispiele wurde ein Prototyp implementiert und mit state of the art"-Plazierungsverfahren verglichen. Schlie\ss lich werden wir uns mit dem Clusteringproblem, das eine Variante des Mehrfachschnitt-Problems darstellt, besch{\"a}ftigen. Dabei steht einerseits im Vordergrund, wie diese Probleme heuristisch gel{\"o}st werden k{\"o}nnen und wie die Integration des Ansatzes in das Plazierungsprogramm erfolgt. Andererseits soll das Clusteringproblem polyedrisch untersucht werden.}, language = {de} } @phdthesis{Martin, author = {Martin, Alexander}, title = {Packen von Steinerb{\"a}umen: Polyedrische Studien und Anwendung.}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-4894}, number = {TR-92-04}, abstract = {Gegeben sei ein Graph \$G=(V,E)\$ mit positiven Kantenkapazit{\"a}ten \$c_e\$ und Knotenmengen \$T_1,\ldots,T_N\$. Das Steinerbaumpackungs-Problem besteht darin, Kantenmengen \$S_1,\ldots,S_N\$ zu finden, so da\ss\ jedes \$S_k\$ die Knoten aus \$T_k\$ verbindet und jede Kante \$e\$ in h{\"o}chstens \$c_e\$ Kantenmengen aus \$S_1,\ldots,S_N\$ vorkommt. Eine zul{\"a}ssige L{\"o}sung dieses Problems nennen wir eine Steinerbaumpackung. Ist zus{\"a}tzlich eine Gewichtung der Kanten gegeben und nach einer bez{\"u}glich dieser Gewichtung minimalen Steinerbaumpackung gesucht, so sprechen wir vom gewichteten Steinerbaumpackungs-Problem. Die Motivation zum Studium dieses Problems kommt aus dem Entwurf elektronischer Schaltungen. Ein dort auftretendes Teilproblem ist das sogenannte Verdrahtungsproblem, das im wesentlichen darin besteht, gegebene Punktmengen unter bestimmten Nebenbedingungen und Optimalit{\"a}tskriterien auf einer Grundfl{\"a}che zu verbinden. Wir studieren das Steinerbaumpackungs-Problem aus polyedrischer Sicht und definieren ein Polyeder, dessen Ecken genau den Steinerbaumpackungen entsprechen. Anschlie\ss end versuchen wir, dieses Polyeder durch gute'' beziehungsweise facetten-definierenden Ungleichungen zu beschreiben. Basierend auf diesen Ungleichungen entwickeln wir ein Schnittebenenverfahren. Die L{\"o}sung des Schnittebenenverfahrens liefert eine untere Schranke f{\"u}r die Optimall{\"o}sung und dient als Grundlage f{\"u}r die Entwicklung guter Primalheuristiken. Wir haben das von uns implementierte Schnittebenenverfahren an einem Spezialfall des Verdrahtungsproblems, dem sogenannten Switchbox-Verdrahtungsproblem, getestet und vielversprechende Ergebnisse erzielt.}, language = {de} } @phdthesis{Harks, author = {Harks, Tobias}, title = {Multicommodity Routing Problems-Selfish Behavior and Online Aspects-}, isbn = {3867273596}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10426}, abstract = {In this thesis, we study multicommodity routing problems in networks, in which commodities have to be routed from source to destination nodes. Such problems model for instance the traffic flows in street networks, data flows in the Internet, or production flows in factories. In most of these applications, the quality of a flow depends on load dependent cost functions on the edges of the given network. The total cost of a flow is usually defined as the sum of the arc cost of the network. An optimal flow minimizes this cost. A main focus of this thesis is to investigate online multicommodity routing problems in networks, in which commodities have to be routed sequentially. Arcs are equipped with load dependent price functions defining routing costs, which have to be minimized. We discuss a greedy online algorithm that routes (fractionally) each commodity by minimizing a convex cost function that depends on the previously routed flow. We present a competitive analysis of this algorithm and prove upper bounds of (d+1)^(d+1) for polynomial price functions with nonnegative coefficients and maximum degree d. For networks with two nodes and parallel arcs, we show that this algorithm returns an optimal solution. Without restrictions on the price functions and network, no algorithm is competitive. We also investigate a variant in which the demands have to be routed unsplittably. In this case, it is NP-hard to compute the offline optimum. Furthermore, we study selfish routing problems (network games). In a network game, players route demand in a network with minimum cost. In this setting, we study the quality of Nash equilibria compared to the the system optimum (price of anarchy) in network games with nonatomic and atomic players and spittable flow. As a main result, we prove upper bounds on the price of anarchy for polynomial latency functions with nonnegative coefficients and maximum degree d, which improve upon the previous best ones.}, language = {en} } @phdthesis{Zachow, author = {Zachow, Stefan}, title = {Computergest{\"u}tzte 3D Osteotomieplanung in der Mund-Kiefer-Gesichtschirurgie unter Ber{\"u}cksichtigung der r{\"a}umlichen Weichgewebeanordnung}, isbn = {3899631986}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10432}, abstract = {In der Arbeit wird die computergest{\"u}tzte Planung von chirurgisch gesetzten Knochenfrakturen bzw. Knochenschnitten (sogenannten Osteotomien) an dreidimensionalen, computergrafischen Sch{\"a}delmodellen, sowie die Umpositionierung separierter kn{\"o}cherner Segmente im Kontext der rekonstruktiven MKG-Chirurgie behandelt. Durch die 3D Modellierung und Visualisierung anatomischer Strukturen, sowie der 3D Osteotomie- und Umstellungsplanung unter Einbeziehung der resultierenden Weichgewebedeformation wird den Chirurgen ein Werkzeug an die Hand gegeben, mit dem eine Therapieplanung am Computer durchgef{\"u}hrt und diese in Hinblick auf Funktion und {\"A}sthetik bewertet werden kann. Unterschiedliche Strategien k{\"o}nnen dabei erprobt und in ihrer Auswirkung erfasst werden. Dazu wird ein methodischer Ansatz vorgestellt, der zum einen die chirurgische Planung im Vergleich zu existierenden Ans{\"a}tzen deutlich verbessert und zum anderen eine robuste Weichgewebeprognose, durch den Einsatz geeigneter Planungsmodelle und eines physikalisch basierten Weichgewebemodells unter Nutzung numerischer L{\"o}sungsverfahren in die Planung integriert. Die Visualisierung der Planungsergebnisse erlaubt sowohl eine anschauliche und {\"u}berzeugende, pr{\"a}operative Patientenaufkl{\"a}rung, als auch die Demonstration m{\"o}glicher Vorgehensweisen und deren Auswirkungen f{\"u}r die chirurgische Ausbildung. Ferner erg{\"a}nzen die Planungsdaten die Falldokumentation und liefern einen Beitrag zur Qualit{\"a}tssicherung. Die Arbeit ist in sieben Kapitel gegliedert und wie folgt strukturiert: Zuerst wird die medizinische Aufgabenstellung bei der chirurgischen Rekonstruktion von Knochenfehlbildungen und -fehlstellungen in der kraniofazialen Chirurgie sowie die daraus resultierenden Anforderungen an die Therapieplanung beschrieben. Anschließend folgt ein umfassender {\"U}berblick {\"u}ber entsprechende Vorarbeiten zur computergest{\"u}tzten Planung knochenverlagernder Operationen und eine kritische Bestandsaufnahme der noch vorhandenen Defizite. Nach der Vorstellung des eigenen Planungsansatzes wird die Generierung individueller, qualitativ hochwertiger 3D Planungsmodelle aus tomografischen Bilddaten beschrieben, die den Anforderungen an eine intuitive, 3D Planung von Umstellungsosteotomien entsprechen und eine Simulation der daraus resultierenden Weichgewebedeformation mittels der Finite-Elemente Methode (FEM) erm{\"o}glichen. Die Methoden der 3D Schnittplanung an computergrafischen Modellen werden analysiert und eine 3D Osteotomieplanung an polygonalen Sch{\"a}delmodellen entwickelt, die es erm{\"o}glicht, intuitiv durch Definition von Schnittlinien am 3D Knochenmodell, eine den chirurgischen Anforderungen entsprechende Schnittplanung unter Ber{\"u}cksichtigung von Risikostrukturen durchzuf{\"u}hren. Separierte Knochensegmente lassen sich im Anschluss interaktiv umpositionieren und die resultierende Gesamtanordnung hinsichtlich einer funktionellen Rehabilitation bewerten. Aufgrund des in dieser Arbeit gew{\"a}hlten, physikalisch basierten Modellierungsansatzes kann unter Ber{\"u}cksichtigung des gesamten Weichgewebevolumens aus der Knochenverlagerung direkt die resultierende Gesichtsform berechnet werden. Dies wird anhand von 13 exemplarischen Fallstudien anschaulich demonstriert, wobei die Prognosequalit{\"a}t mittels postoperativer Fotografien und postoperativer CT-Daten {\"u}berpr{\"u}ft und belegt wird. Die Arbeit wird mit einem Ausblick auf erweiterte Modellierungsans{\"a}tze und einem Konzept f{\"u}r eine integrierte, klinisch einsetzbare Planungsumgebung abgeschlossen.}, language = {de} } @phdthesis{Weider, author = {Weider, Steffen}, title = {Integration of Vehicle and Duty Scheduling in Public Transport}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-16240}, abstract = {This thesis describes the algorithm IS-OPT that integrates scheduling of vehicles and duties in public bus transit. IS-OPT is the first algorithm which solves integrated vehicle and duty scheduling problems arising in medium sized carriers such that its solutions can be used in daily operations without further adaptions. This thesis is structured as follows: The first chapter highlights mathematical models of the planning process of public transit companies and examines their potential for integrating them with other planning steps. It also introduces descriptions of the vehicle and the duty scheduling problem. Chapter 2 motivates why it can be useful to integrate vehicle and duty scheduling, explains approaches of the literature, and gives an outline of our algorithm IS-OPT. The following chapters go into the details of the most important techniques and methods of IS-OPT: In Chapter 3 we describe how we use Lagrangean relaxation in a column generation framework. Next, in Chapter 4, we describe a variant of the proximal bundle method (PBM) that is used to approximate linear programs occurring in the solution process. We introduce here a new variant of the PBM which is able to utilize inexact function evaluation and the use of epsilon-subgradients. We also show the convergence of this method under certain assumptions. Chapter 5 treats the generation of duties for the duty scheduling problem. This problem is modeled as a resourceconstraint- shortest-path-problem with non-linear side constraints and nearly linear objective function. It is solved in a two-stage approach. At first we calculate lower bounds on the reduced costs of duties using certain nodes by a new inexact label-setting algorithm. Then we use these bounds to speed up a depth-first-search algorithm that finds feasible duties. In Chapter 6 we present the primal heuristic of IS-OPT that solves the integrated problem to integrality. We introduce a new branch-and-bound based heuristic which we call rapid branching. Rapid branching uses the proximal bundle method to compute lower bounds, it introduces a heuristic node selection scheme, and it utilizes a new branching rule that fixes sets of many variables at once. The common approach to solve the problems occurring in IS-OPT is to trade inexactness of the solutions for speed of the algorithms. This enables, as we show in Chapter 7, to solve large real world integrated problems by IS-OPT. The scheduled produced by IS-OPT save up to 5\% of the vehicle and duty cost of existing schedules of regional and urban public transport companies.}, language = {en} } @phdthesis{Zymolka, author = {Zymolka, Adrian}, title = {Design of Survivable Optical Networks by Mathematical Optimization}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10408}, abstract = {Abstract The cost-efficient design of survivable optical telecommunication networks is the topic of this thesis. In cooperation with network operators, we have developed suitable concepts and mathematical optimization methods to solve this comprehensive planning task in practice. Optical technology is more and more employed in modern telecommunication networks. Digital information is thereby transmitted as short light pulses through glass fibers. Moreover, the optical medium allows for simultaneous transmissions on a single fiber by use of different wavelengths. Recent optical switches enable a direct forwarding of optical channels in the network nodes without the previously required signal retransformation to electronics. Their integration creates ongoing optical connections,which are called lightpaths. We study the problem of finding cost-efficient configurations of optical networks which meet specified communication requirements. A configuration comprises the determination of all lightpaths to establish as well as the detailed allocation of all required devices and systems. We use a flexible modeling framework for a realistic representation of the networks and their composition. For different network architectures, we formulate integer linear programs which model the design task in detail. Moreover, network survivability is an important issue due to the immense bandwidths offered by optical technology. Operators therefore request for designs which perpetuate protected connections and guarantee for a defined minimum throughput in case of malfunctions. In order to achieve an effective realization of scalable protection, we present a novel survivability concept tailored to optical networks and integrate several variants into the models. Our solution approach is based on a suitable model decomposition into two subtasks which separates two individually hard subproblems and enables this way to compute cost-efficient designs with approved quality guarantee. The first subtask consists of routing the connections with corresponding dimensioning of capacities and constitutes a common core task in the area of network planning. Sophisticated methods for such problems have already been developed and are deployed by appropriate integration. The second subtask is characteristic for optical networks and seeks for a conflict-free assignment of available wavelengths to the lightpaths using a minimum number of involved wavelength converters. For this coloring-like task, we derive particular models and study methods to estimate the number of unavoidable conversions. As constructive approach, we develop heuristics and an exact branch-and-price algorithm. Finally, we carry out an extensive computational study on realistic data, provided by our industrial partners. As twofold purpose, we demonstrate the potential of our approach for computing good solutions with quality guarantee, and we exemplify its flexibility for application to network design and analysis.}, language = {en} } @phdthesis{Roeblitz, author = {R{\"o}blitz, Thomas}, title = {Co-Reservation of Resources in the Grid}, isbn = {978-3-86853-137-4}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11626}, abstract = {Executing applications in the Grid often requires access to multiple geographically distributed resources. In a Grid environment, these resources belong to different administrative domains, each employing its own scheduling policy. That is, at which time an activity (e.g., compute job, data transfer) is started, is decided by the resource's local management system. In such an environment, the coordinated execution of distributed applications requires guarantees on the quality of service (QoS) of the needed resources. Reserving resources in advance is an accepted means to obtain QoS guarantees from a single provider. The challenge, however, is to coordinate advance reservations of multiple resources. This work presents a system architecture and mechanisms to coordinate multiple advance reservations -- called co-reservations -- for delivering QoS guarantees to complex applications. We formally define the co-reservation problem as an optimization problem. The presented model supports three dimensions of freedom: the start time, the duration and the service level of a reservation. Requests and resources are described in a simple language. After matching the static properties and requirements of either side in a mapping, the reservation mechanism probes information about the future status of the resources. The versatile design of the probing step allows the efficient processing of requests, but also lets the resources express their preferences among the myriads of reservation candidates. Next, the best mapping is found through an implementation of the formal co-reservation model. Then, the mapping has to be secured, i.e., resources need to be allocated to a co-reservation candidate with all-or-nothing semantics. We study several goal-driven sequential and concurrent allocation mechanisms and define schemes for handling allocation failures. Finally, we introduce the concept of virtual resources for seamlessly embedding co-reservations into Grid resource management.}, language = {en} } @phdthesis{Tuchscherer, author = {Tuchscherer, Andreas}, title = {Local Evaluation of Policies for Discounted Markov Decision Problems}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11963}, school = {Freie Universit{\"a}t Berlin}, pages = {208}, abstract = {Providing realistic performance indicators of online algorithms for a given online optimization problem is a difficult task in general. Due to significant drawbacks of other concepts like competitive analysis, Markov decision problems (MDPs) may yield an attractive alternative whenever reasonable stochastic information about future requests is available. However, the number of states in MDPs emerging from real applications is usually exponential in the original input parameters. Therefore, the standard methods for analyzing policies, i.e., online algorithms in our context, are infeasible. In this thesis we propose a new computational tool to evaluate the behavior of policies for discounted MDPs locally, i.e., depending on a particular initial state. The method is based on a column generation algorithm for approximating the total expected discounted cost of an unknown optimal policy, a concrete policy, or a single action (which assumes actions at other states to be made according to an optimal policy). The algorithm determines an \$\varepsilon\$-approximation by inspecting only relatively small local parts of the total state space. We prove that the number of states required for providing the approximation is independent of the total number of states, which underlines the practicability of the algorithm. The approximations obtained by our algorithm are typically much better than the theoretical bounds obtained by other approaches. We investigate the pricing problem and the structure of the linear programs encountered in the column generation. Moreover, we propose and analyze different extensions of the basic algorithm in order to achieve good approximations fast. The potential of our analysis tool is exemplified for discounted MDPs emerging from different online optimization problems, namely online bin coloring, online target date assignment, and online elevator control. The results of the experiments are quite encouraging: our method is mostly capable to provide performance indicators for online algorithms that much better reflect observations made in simulations than competitive analysis does. Moreover, the analysis allows to reveal weaknesses of the considered online algorithms. This way, we developed a new online algorithm for the online bin coloring problem that outperforms existing ones in our analyses and simulations.}, language = {en} } @phdthesis{Burger2000, author = {Burger, Sven}, title = {Erzeugung und Untersuchung dunkler Solitonen in Bose-Einstein Kondensaten}, year = {2000}, language = {en} } @phdthesis{Ruprecht, author = {Ruprecht, Daniel}, title = {Analysis of a multi-scale asymptotic model for internal gravity waves in a moist atmosphere}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000018355-1}, abstract = {The thesis presents the analysis of a reduced model for modulation of internal gravity waves by deep convective clouds. The starting point for the derivation are conservation laws for mass, momentum and energy coupled with a bulk micro-physics model describing the evolution of mixing ratios of water vapor, cloud water and rain water. A reduced model for the identified scales of the regime is derived, using multi-scale asymptotics. The closure of the model employs conditional averaging over the horizontal scale of the convective clouds. The resulting reduced model is an extension of the anelastic equations, linearized around a constant background state, which are well-known from meteorology. The closure of the model is achieved purely by analytical means and involves no additional physically motivated assumptions. The essential new parameter arising from the coupling to a micro-physics model is the area fraction of saturated regions on the horizontal scale of the convective clouds. It turns out that this parameter is constant on the employed short timescale. Hence the clouds constitute a constant background, modulating the characteristics of propagation of internal waves. The model is then investigated by analytical as well as numerical means. Important results are, among others, that in the model moisture (i) inhibits propagation of internal waves by reducing the modulus of the group velocity, (ii) reduces the angle between the propagation direction of a wave-packet and the horizontal, (iii) causes critical layers and (iv) introduces a maximum horizontal wavelength beyond which waves are no longer propagating but become evanescent. The investigated examples of orographically generated gravity waves also feature a significant reduction of vertical momentum flux by moisture. The model is extended by assuming systematically small under-saturation, that is saturation at leading order. The closure is similar to the original case but requires additional assumptions. The saturated area fraction in the obtained model is no longer constant but now depends nonlinearly on vertical displacement and thus on vertical velocity.}, language = {en} } @phdthesis{Orlowski, author = {Orlowski, Sebastian}, title = {Optimal Design of Survivable Multi-layer Telecommunication Networks}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-11876}, abstract = {Telecommunication transport networks consist of a stack of technologically different subnetworks, so-called layers, which are strongly interdependent. For example, one layer may correspond to an Internet (IP) backbone network whose links are realized by lightpath connections in an underlying optical fiber layer. To ensure that the network can fulfill its task of routing all communication requests, the inter-layer dependencies have to be taken into account already in the planning phase of the network. This is particularly important with survivability constraints, where connections in one layer have to be protected against cable cuts or equipment failures in another layer. The traditional sequential planning approach where one layer is optimized after the other cannot properly take care of the inter-layer dependencies; this can only be achieved with an integrated planning of several network layers at the same time. This thesis provides mathematical models and algorithmic techniques for the integrated optimization of two network layers with survivability constraints. We describe a multi-layer network design problem which occurs in various technologies, and model it mathematically using mixed-integer programming (MIP) formulations. The presented models cover many important practical side constraints from different technological contexts. In contrast to previous models from the literature, they can be used to design large two-layer networks with survivability requirements. We discuss modeling alternatives for various aspects of a multi-layer network and compare different routing formulations under multi-layer survivability constraints. We solve our models using a branch-and-cut-and-price approach with various problemspecific enhancements. This includes a presolving technique based on linear programming to reduce the problem size, combinatorial and sub-MIP-based primal heuristics to compute feasible network configurations, cutting planes which take the multi-layer survivability constraints into account to improve the lower bound on the optimal network cost, and column generation to generate flow variables dynamically during the algorithm. We develop techniques to speed up computations in a Benders decomposition approach and compare this approach to the standard formulation with a single MIP. We use the developed techniques to design large survivable two-layer networks by means of linear and integer programming methods. On realistic test instances with up to 67 network nodes and survivability constraints, we investigate the algorithmic impact of our techniques and show how to use them to compute good network configurations with quality guarantees. Most of the smaller test instances with up to 17 nodes can be solved to near-optimality. Moreover, we can compute feasible solutions and dual bounds even for large networks with survivability constraints, which has not been possible before.}, language = {en} } @phdthesis{Zschiedrich, author = {Zschiedrich, Lin}, title = {Transparent boundary conditions for Maxwell's equations: Numerical concepts beyond the PML method}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000013994-8}, abstract = {Optical technologies are ubiquitously used in hi-tech devices. As a common feature of such devices one finds structures with dimensions in the order of the wavelength of the used light. To design and produce such devices, the wave nature of light must be taken into account. Accordingly, robust simulation tools are required which are based on rigorously solving Maxwell's equations, the governing equations of light propagation within macroscopic media. This thesis contributes to the modeling and the numerical computation of light scattering problems: Light scattering problems are typically posed on the entire space. The Perfectly-Matched -Layer method (PML) is widely used to restrict the simulation problem onto a bounded computational domain. We propose an adaptive PML method which exhibits a good convergence even for critical problems where standard PML implementations fail. Besides the computation of the near field, that is the electromagnetic field within the computational domain, it is of major interest to evaluate the electromagnetic field in the exterior domain and to compute the far field. So far, this was numerically only possible for simple geometries such as homogeneous exterior domains or layered media. To deal with more complicated devices, for example with waveguide inhomogeneities, we develop an evaluation formula based on the PML solution which allows for an exterior domain field evaluation in a half space above the device. Finally, we generalize the PML method to problems with multiply structured exterior domains. The term "multiply structured exterior domain" is defined in this thesis and means that the exterior domain exhibits several half-infinite structures. Mathematically, this gives rise to various complications. For example, no analytical solutions to Maxwell's equations for standard light sources are available in the exterior domain, which are needed to describe the incoming field in a light scattering problem. To tackle this we propose a new light scattering problem formulation which fits well into the PML method framework and which may be regarded as an extension of classical contributions by Sommerfeld, Wiener and Hopf. An exterior domain evaluation formula for multiply structured exterior domains with an extended illumination is derived as well.}, language = {en} } @phdthesis{Schmidt2002, author = {Schmidt, Frank}, title = {Solution of Interior-Exterior Helmholtz-Type Problems Based on the Pole Condition Concept}, year = {2002}, language = {en} } @phdthesis{Schmidt1989, author = {Schmidt, Frank}, title = {Ingenieurm{\"a}ßige Berechnung der Feldausbreitung in optischen Bauelementen der Lichtwellenleitertechnik}, year = {1989}, language = {en} } @phdthesis{Reinecke2009, author = {Reinecke, Isabel}, title = {Mathematical modeling and simulation of the female menstrual cycle}, year = {2009}, language = {en} } @phdthesis{Schintke2010, author = {Schintke, Florian}, title = {Management verteilter Daten in Grid- und Peer-to-Peer-Systemen}, year = {2010}, language = {en} } @phdthesis{Schuett2010, author = {Sch{\"u}tt, Thorsten}, title = {Range queries in distributed hash tables}, year = {2010}, language = {en} } @phdthesis{Deuflhard1968, author = {Deuflhard, Peter}, title = {Stimulierte Brillouinstreuung in Germanium bei 10.6 μ}, address = {Physics Dept.}, year = {1968}, language = {en} } @phdthesis{Deuflhard1972, author = {Deuflhard, Peter}, title = {Ein Newton-Verfahren bei fastsingul{\"a}rer Funktionalmatrix zur L{\"o}sung von nichtlinearen Randwertaufgaben mit der Mehrzielmethode}, address = {Math. Institute University of Cologne}, year = {1972}, language = {en} } @phdthesis{Deuflhard1976, author = {Deuflhard, Peter}, title = {A Stepsize Control for Continuation Methods with Special Application to Multiple Shooting Techniques}, address = {Math. Institute, Munich University of Technology}, year = {1976}, language = {en} } @phdthesis{Klapproth2012, author = {Klapproth, Corinna}, title = {Adaptive numerical integration for dynamical contact problems}, year = {2012}, language = {en} } @phdthesis{Gladilin2003, author = {Gladilin, Evgeny}, title = {Biomechanical Modeling of Soft Tissue and Facial Expressions for Craniofacial Surgery Planning}, address = {Germany}, year = {2003}, language = {en} } @phdthesis{Schiela2006, author = {Schiela, Anton}, title = {The Control Reduced Interior Point Method. A Function Space Oriented Algorithmic Approach}, address = {Fachbereich Mathematik}, year = {2006}, language = {en} } @phdthesis{Sobe2013, author = {Sobe, Kathrin}, title = {Consistency and Fault Tolerance of Distributed Storage Systems}, year = {2013}, language = {en} } @phdthesis{Stender2013, author = {Stender, Jan}, title = {Snapshots in Large-Scale Distributed File Systems}, year = {2013}, language = {en} } @phdthesis{Hoegqvist2012, author = {H{\"o}gqvist, Mikael}, title = {Consistent Key-Based Routing in Decentralized and Reconfigurable Data Services}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:11-100206145}, year = {2012}, language = {en} } @phdthesis{Kolbeck2012, author = {Kolbeck, Bj{\"o}rn}, title = {A fault-tolerant and scalable protocol for replication in distributed file systems}, year = {2012}, language = {en} } @phdthesis{Werner2009, author = {Werner, Axel}, title = {Linear constraints on face numbers of polytopes}, year = {2009}, language = {en} } @phdthesis{Stephan2009, author = {Stephan, R{\"u}diger}, title = {Polyhedral aspects of cardinality constrained combinatorial optimization problems}, year = {2009}, language = {en} } @phdthesis{Heismann, author = {Heismann, Olga}, title = {The Hypergraph Assignment Problem}, abstract = {This thesis deals with the hypergraph assignment problem (HAP), a set partitioning problem in a special type of hypergraph. The HAP generalizes the assignment problem from bipartite graphs to what we call bipartite hypergraphs, and is motivated by applications in railway vehicle rotation planning. The main contributions of this thesis concern complexity, polyhedral results, analyses of random instances, and primal methods for the HAP. We prove that the HAP is NP-hard and APX-hard even for small hyperedge sizes and hypergraphs with a special partitioned structure. We also study the complexity of the set packing and covering relaxations of the HAP, and present for certain cases polynomial exact or approximation algorithms. A complete linear description is known for the assignment problem. We therefore also study the HAP polytope. There, we have a huge number of facet-defining inequalities already for a very small problem size. We describe a method for dividing the inequalities into equivalence classes without resorting to a normal form. Within each class, facets are related by certain symmetries and it is sufficient to list one representative of each class to give a complete picture of the structural properties of the polytope. We propose the algorithm "HUHFA" for the classification that is applicable not only to the HAP but combinatorial optimization problems involving symmetries in general. In the largest possible HAP instance for which we could calculate the complete linear description, we have 14049 facets, which can be divided into 30 symmetry classes. We can combinatorially interpret 16 of these classes. This is possible by employing cliques to generalize the odd set inequalities for the matching problem. The resulting inequalities are valid for the polytope associated with the set packing problem in arbitrary hypergraphs and have a clear combinatorial meaning. An analysis of random instances provides a better insight into the structure of hyperassignments. Previous work has extensively analyzed random instances for the assignment problem theoretically and practically. As a generalization of these results for the HAP, we prove bounds on the expected value of a minimum cost hyperassignment that uses half of the maximum possible number of hyperedges that are not edges. In a certain complete partitioned hypergraph G2,2n with i. i. d. exponential random variables with mean 1 as hyperedge costs it lies between 0.3718 and 1.8310 if the vertex number tends to infinity. Finally, we develop an exact combinatorial solution algorithm for the HAP that combines three methods: A very large-scale neighborhood search, the composite columns method for the set partitioning problem, and the network simplex algorithm.}, language = {en} } @phdthesis{Stoetzel, author = {St{\"o}tzel, Claudia}, title = {Numerical and Discrete Modeling of Reproductive Endocrinological Networks}, abstract = {This thesis deals with the mathematical modeling of endocrinological networks that are underlying the female hormone cycle. These networks consist of a variety of biological mechanisms in different parts of the organism. Their interaction leads to periodic changes of various substances that are necessary for reproduction. In every cycle, hormones are secreted from the hypothalamic-pituitary-gonadal axis into the bloodstream, where they distribute and influence several functions in the body. Their most important task in reproduction is to regulate processes in the ovaries, where follicles and corpus luteum develop. These produce steroids that are released into the blood and from therein regulate the processes in the hypothalamic-pituitary-gonadal axis. The hormonal cycle is thus a result of a large feedback loop, whose self-regulation is a complex interplay of multiple components. For the modeling of these processes, a high abstraction level is required, which can be realized by various modeling approaches. In this work, some of these approaches are implemented. The first step in all approaches is the representation of the most important mechanisms in a flowchart. In the next step, this can be implemented as a system of ordinary differential equations using Hill functions, as a piecewise defined affine differential equation model, or directly as a purely regulatory model. Using this approach, a differential equation model for the hormonal cycle of cows is developed. This is compared with a more advanced model of the menstrual cycle in humans. Both models are validated by comparing simulations with measured values, and by studying external influences such as drug administration. For the example of the bovine estrous cycle, continuous analysis methods are used to investigate stability, follicular wave patterns, and robustness with respect to parameter perturbations. Furthermore, the model is substantially reduced while preserving the main simulation results. To take a look at alternative modeling approaches, corresponding discrete models are derived, exemplified for the bovine model. For a piecewise affine version of the model, parameter constraints for the continuous model are calculated. Stability is analyzed globally for a purely discrete model. In addition, core discrete models are derived, which retain the dynamic properties of the original model.}, language = {en} } @phdthesis{Weinkauf2008, author = {Weinkauf, Tino}, title = {Extraction of Topological Structures in 2D and 3D Vector Fields}, year = {2008}, language = {en} } @phdthesis{SchmidtEhrenberg2008, author = {Schmidt-Ehrenberg, Johannes}, title = {Analysis and Visualization of Molecular Conformations}, year = {2008}, language = {en} } @phdthesis{Zachow2005, author = {Zachow, Stefan}, title = {Computer assisted osteotomy planning in cranio-maxillofacial surgery under consideration of facial soft tissue changes}, year = {2005}, language = {en} } @phdthesis{Benger2004, author = {Benger, Werner}, title = {Visualization of General Relativistic Tensor Fields via a Fiber Bundle Data Model}, year = {2004}, language = {en} } @phdthesis{neeZoeckler2003, author = {(n{\´e}e Z{\"o}ckler), Malte}, title = {Efficient Visualization and Reconstruction of 3D Geometric from Neuro-Biological Confocal Microscope Scans}, year = {2003}, language = {en} } @phdthesis{Kasten2012, author = {Kasten, Jens}, title = {Lagrangian feature extraction in two-dimensional unsteady flows}, year = {2012}, language = {en} } @phdthesis{Reininghaus2012, author = {Reininghaus, Jan}, title = {Computational discrete Morse theory}, year = {2012}, language = {en} } @phdthesis{Sahner2009, author = {Sahner, Jan}, title = {Extraction of Vortex Structures in 3D Flow Fields}, year = {2009}, language = {en} } @phdthesis{Pomplun, author = {Pomplun, Jan}, title = {Reduced basis method for electromagnetic scattering problems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000017571-3}, language = {en} } @phdthesis{Kettner, author = {Kettner, Benjamin}, title = {Detection of spurious modes in resonance mode computations}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000038256-7}, language = {en} } @phdthesis{Lockau, author = {Lockau, Daniel}, title = {Optical modeling of thin film silicon solar cells with random and periodic light management textures}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-38759}, language = {en} } @phdthesis{Weber, author = {Weber, Britta}, title = {Reconstruction of Microtubule Centerlines from Electron Tomograms}, abstract = {The organization of the mitotic spindle, a structure that separates the chromosomes during cell division, is an active research topic in molecular cell biology. It is composed of microtubules, elongated tubular macromolecules with a diameter of 25 nm. The only volumetric imaging technique that is available to a wide community and provides the required resolution to capture details about microtubules is electron tomography. However, the automatic detection of microtubules in electron tomograms is a difficult task due to the low contrast of the data. Furthermore, thick samples have to be cut into 300 nm thin sections before electron tomography can be applied. Software for automatically segmentation and stitching of the microtubules are not available and therefore these tasks have to be performed manually. Unfortunately, manual segmentation is time consuming for large samples and manual stitching of the tomograms is often infeasible due to the lack of prominent features for registration. Conclusions drawn from electron tomographic data is currently mostly based on either small samples containing few microtubules or single sections of complex structures. Consequently, simple properties, such as the length of microtubules in the spindle or their number, are still unknown for most model organisms. In this thesis, we present methods for 1) an automatic segmentation of microtubule centerlines in electron tomograms, and 2) an automatic stitching of the lines extracted from serial sections. For the centerline segmentation, we use 3D template matching and exploit knowledge about shape of microtubules and microscopy artifacts to design the templates. For the registration of the lines, we present a way to model the orientation of lines as a mixture of Fisher-Mises distributions where we estimate transformation parameters with the expectation maximization algorithm. The final line matching problem is formulated in terms of a probabilistic graphical model. To find the correct correspondences of line ends, we use belief propagation. We handle the poor convergence properties of this algorithm by detecting ambiguous and conflicting assignments of lines automatically. An expert can then influence the final output of the algorithm by solving conflicts manually. A detailed error analysis on true biological data and assessment of the reliability of the results is the prerequisite for analyzing the resulting line representations of the microtubules. To this end, the developed workflow for segmenting and stitching of microtubule centerlines is evaluated on plasticembedded samples of C. elegans early embryos and of spindles from X. laevis egg extracts. Our results suggest that the output of the presented algorithms together with little manual correction is of sufficient quality to allow a detailed analysis of dense microtubule networks. Finally, we exemplarily show results for the centrosome of a C. elegans mitotic spindle.}, language = {en} } @phdthesis{Poethkow, author = {P{\"o}thkow, Kai}, title = {Modeling, Quantification and Visualization of Probabilistic Features in Fields with Uncertainties}, abstract = {Eine grundlegende Eigenschaft von naturwissenschaftlichen Daten ist, dass der wahre Wert einer Gr{\"o}ße nicht beliebig genau bestimmbar ist. Es ist lediglich m{\"o}glich, ihn durch Intervalle einzugrenzen oder die Unsicherheit durch eine Wahrscheinlichkeitsverteilung zu charakterisieren. Dies gilt f{\"u}r alle reellwertigen Daten, sowohl f{\"u}r Mess-, als auch f{\"u}r Simulationsergebnisse. Beispiele sind Messungen von grundlegenden physikalischen Gr{\"o}ßen wie Geschwindigkeit oder auch langfristige Temperaturvorhersagen, die durch Klimamodelle berechnet werden. Die Unsicherheit von Ergebnissen ist eine wichtige Information, die in Natur- und Ingenieurwissenschaften h{\"a}ufig durch Konfidenzintervalle in 1D-Plots und Tabellen angezeigt wird. Im Gegensatz dazu ist es bisher bei der Visualisierung von 2D- und 3D-Daten mithilfe von Standardmethoden meist unm{\"o}glich, die Datenunsicherheit zu repr{\"a}sentieren. Diese Arbeit stellt wahrscheinlichkeitstheoretisch fundierte Methoden vor, die die Analyse und Visualisierung von Skalar-, Vektor- und Tensorfeldern mit Unsicherheiten erm{\"o}glichen. Der Fokus liegt dabei auf der Extraktion von raumzeitlichen geometrischen und topologischen Merkmalen aus den Feldern (z.B. Isokonturen und kritische Punkte). Wir nutzen parametrische und nichtparametrische Zufallsfelder, um Variabilit{\"a}t und r{\"a}umliche Korrelation mathematisch zu modellieren. Die Wahrscheinlichkeitsverteilungen werden aus Ensemble-Datens{\"a}tzen gesch{\"a}tzt, die mehrere Simulationsergebnisse (z.B. basierend auf variierenden Simulationsparametern) zusammenfassen. Wir untersuchen die Konditionszahlen von Merkmalsextraktionsmethoden, um die Sensitivit{\"a}t, d.h. die Verst{\"a}rkung oder Abschw{\"a}chung der Unsicherheit der Ergebnisse relativ zu Unsicherheiten in den Eingangsdaten abzusch{\"a}tzen. Wir stellen einen allgemeiner Ansatz f{\"u}r die probabilistische Merkmalsextraktion vor, der die Basis f{\"u}r die Berechnung r{\"a}umlicher Wahrscheinlichkeitsverteilungen von verschiedenen Merkmalen in Skalar-, Vektor- und Tensorfeldern bildet. In diesem Framework werden Wahrscheinlichkeiten f{\"u}r die Existenz von Merkmalen aus lokalen Randverteilungen und formalen Merkmalsdefinitionen berechnet. Numerisch k{\"o}nnen die Wahrscheinlichkeiten durch Monte-Carlo­-Integration bestimmt werden. Um den hohen Rechenaufwand dieses Ansatzes zu vermeiden, schlagen wir schnelle Berechnungsmethoden vor, wobei Merkmalswahrscheinlichkeiten n{\"a}herungsweise mit Hilfe von Surrogatfunktionen bzw. Lookup-Tabellen gesch{\"a}tzt werden. Die vorgeschlagenen Methoden werden anhand von Daten aus Klima- und Biofluidmechaniksimulationen sowie aus der medizinischen Bildgebung qualitativ und quantitativ evaluiert.}, language = {en} } @phdthesis{Lubkoll, author = {Lubkoll, Lars}, title = {An Optimal Control Approach to Implant Shape Design : Modeling, Analysis and Numerics}, pages = {210}, abstract = {Facial trauma or congenital malformation of bones of the skull may degrade both skeletal integrity as well as the esthetic appearance. For the attending surgeon a prediction of the esthetic outcome of a bone replacement or augmentation implant insertion is challenging. Therefore, it would be advantageous if we were able to compute an implant shape from a given desired outcome. This task presents the main focus of this thesis. Besides the development of a model for the implant shape design problem, this work is concerned with the efficient solution and optimization of realistic models. This includes recent material laws for different soft tissue types as well as complex geometries attained from medical image data. The implant shape design problem can be described as an optimal control problem with constraints given by the necessary optimality conditions in polyconvex hyperelasticity with nonlinear pressure-type boundary conditions. Important theoretical results, such as existence of solutions and higher regularity, are currently not available for such problems. Based on the existence result for polyconvex materials laws, existence of solutions of the nonconvex optimal control problem is proven for the case of a simpler Neumann boundary condition. Due to the "impossible convexity" and the high nonlinearity of hyperelastic material laws the numerical solution of the arising problems is difficult. In this regard, an affine covariant composite step method for nonconvex, equality constrained optimization is presented. The corresponding globalization strategy is based on the affine covariant Newton method for underdetermined systems and cubic regularization methods for unconstrained optimization problems. The linear systems arising from the discretization of constrained optimization problems are described by saddle point matrices. The efficient solution of these equality systems by conjugate gradient methods for convex and nonconvex problems is discussed. Moreover, an error estimator that fits into the affine covariant setting is presented. The presented composite step method was implemented in the C++ finite element library Kaskade 7. The performance of the algorithm is demonstrated on several examples. Next to simple optimization problems, with admissible set given through models of linear and nonlinear heat transfer, we give four examples with nonconvex, hyperelastic constraints.}, language = {en} } @phdthesis{Gupta, author = {Gupta, Pooja}, title = {Integrating high-density marker information into the genetic evaluation of the honey bee}, language = {en} } @phdthesis{Hammerschmidt, author = {Hammerschmidt, Martin}, title = {Optical simulation of complex nanostructured solar cells with a reduced basis method}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000102429-1}, pages = {XX, 169}, abstract = {Simulations of optical processes and complex nanostructured devices have become omnipresent in recent years in several fields of current research and industrial applications, not limited to the field of photovoltaics. Devices or processes are optimized with respect to a certain objective where the underlying physical processes are described by partial differential equations. In photovoltaics and photonics electromagnetic fields are investigated which are governed by Maxwell's equations. In this thesis a reduced basis method for the solution of the parameter dependent electromagnetic scattering problem with arbitrary parameters is developed. The method is developed with the specific challenges arising in optical simulations of thin-film silicon solar cells in mind. These are large in domain size and have a complex three-dimensional structure, making optimization tasks infeasible if high-accuracy of the electromagnetic field solution is required. The application of the empirical interpolation methods allows to expand an arbitrary parameter dependence affinely. Thus not only geometries, but also material tensors and source fields can be parameterized. Additionally, the required non-linear post-processing steps of the electromagnetic field to derive energy fluxes or volume absorption are addressed. The reduced basis method allows to reduce the computational costs by orders of magnitude compared to efficient finite element solvers. In addition, an efficient tailored domain decomposition algorithm is presented to model incoherent layers or illuminations in optical systems efficiently. This is of particular interest for solar cells in superstrate configuration where the absorber is illuminated through a glass substrate. The developed methods are employed in application examples taken from collaborations with experimentalists active in the joint lab "BerOSE" (Berlin Joint Lab for Optical Simulations for Energy Research). The optical model of a thin-film silicon multi-junction with incoherent light-trapping is characterized in great detail. The computational gains through hybrid, hp adaptive finite elements are studied and the incoherent domain decomposition algorithm is applied to model a more realistic light-trapping by the glass substrate. The numerical examples of a hexagonal nano-hole array and multi-junction silicon solar cell with a tunable intermediate reflector layer show that the reduced basis method is well suited as a forward solver for modeling and optimization tasks arising in photovoltaics and photonics. Reduced models for illumination and geometric parameters are built providing up to five orders of magnitude savings in computational costs. Resonance phenomena present in the nano-hole array example are detected and the model adapts itself automatically.}, language = {en} } @phdthesis{Lindow, author = {Lindow, Norbert}, title = {Visual Analysis of Atomic Structures Based on the Hard-Sphere Model}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63190}, abstract = {Visualization and Analysis of atomic compositions is essential to understand the structure and functionality of molecules. There exist versatile areas of applications, from fundamental researches in biophysics and materials science to drug development in pharmaceutics. For most applications, the hard-sphere model is the most often used molecular model. Although the model is a quite simple approximation of reality, it enables investigating important physical properties in a purely geometrical manner. Furthermore, large data sets with thousands up to millions of atoms can be visualized and analyzed. In addition to an adequate and efficient visualization of the data, the extraction of important structures plays a major role. For the investigation of biomolecules, such as proteins, especially the analysis of cavities and their dynamics is of high interest. Substrates can bind in cavities, thereby inducing changes in the function of the protein. Another example is the transport of substrates through membrane proteins by the dynamics of the cavities. For both, the visualization as well as the analysis of cavities, the following contributions will be presented in this thesis: 1. The rendering of smooth molecular surfaces for the analysis of cavities is accelerated and visually improved, which allows showing dynamic proteins. On the other hand, techniques are proposed to interactively render large static biological structures and inorganic materials up to atomic resolution for the first time. 2. A Voronoi-based method is presented to extract molecular cavities. The procedure comes with a high geometrical accuracy by a comparatively fast computation time. Additionally, new methods are presented to visualize and highlight the cavities within the molecular structure. In a further step, the techniques are extended for dynamic molecular data to trace cavities over time and visualize topological changes. 3. To further improve the accuracy of the approaches mentioned above, a new molecular surface model is presented that shows the accessibility of a substrate. For the first time, the structure and dynamics of the substrate as hard-sphere model is considered for the accessibility computation. In addition to the definition of the surface, an efficient algorithm for its computation is proposed, which additionally allows extracting cavities. The presented algorithms are demonstrated on different molecular data sets. The data sets are either the result of physical or biological experiments or molecular dynamics simulations.}, language = {en} } @phdthesis{Badowski, author = {Badowski, Tomasz}, title = {Adaptive importance sampling via minimization of estimators of cross-entropy, mean square and inefficiency constants}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000102823-3}, language = {en} } @phdthesis{Gul, author = {Gul, Raheem}, title = {Mathematical Modeling and Sensitivity Analysis of Lumped­ Parameter Model of the Human Cardiovascular System}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000101271-2}, language = {en} } @phdthesis{Lie, author = {Lie, Han Cheng}, title = {On a strongly convex approximation of a stochastic optimal control problem for importance sampling of metastable diffusions}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000101680-4}, language = {en} } @phdthesis{Sriwattanaworachai, author = {Sriwattanaworachai, Nisara}, title = {Spectral approach to metastability of non-reversible complex processes}, language = {en} } @phdthesis{Vega, author = {Vega, Iliusi}, title = {Reconstruction and analysis of the state space for the identification of dynamical states in real-world time-series}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000104520-1}, language = {en} } @phdthesis{Agarwal, author = {Agarwal, Animesh}, title = {Path Integral Techniques in Molecular Dynamics Simulations of Open Boundary Systems}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000103020-4}, language = {en} } @phdthesis{Manley, author = {Manley, Phillip}, title = {Simulation of Plasmonic Nanoparticles in Thin Film Solar Cells}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000102531-2}, language = {en} } @phdthesis{Karbstein, author = {Karbstein, Marika}, title = {Line Planning and Connectivity}, isbn = {978-3-8439-1062-0}, abstract = {This thesis introduces the Steiner connectivity problem. It is a generalization of the well known Steiner tree problem. Given a graph G = (V, E) and a subset T ⊆ V of the nodes, the Steiner tree problem consists in finding a cost minimal set of edges connecting all nodes in T . The Steiner connectivity problem chooses, instead of edges, from a given set of paths a subset to connect all nodes in T . We show in the first part of this thesis that main results about complexity, approximation, integer programming formulations, and polyhedra can be generalized from the Steiner tree problem to the Steiner connectivity problem. An example for a straightforward generalization are the Steiner partition inequalities, a fundamental class of facet defining inequalities for the Steiner tree problem. They can be defined for the Steiner connectivity problem in an analogous way as for the Steiner tree problem. An example for a generalization that needs more effort is the definition of a directed cut formulation and the proof that this dominates the canonical undirected cut formulation enriched by all Steiner partition inequalities. For the Steiner connectivity problem this directed cut formulation leads to extended formulations, a concept that is not necessary for the Steiner tree problem. There are also major differences between both problems. For instance, the case T = V for the Steiner connectivity problem is equivalent to a set covering problem and, hence, not a polynomial solvable case as in the Steiner tree problem. The Steiner connectivity problem is not only an interesting generalization of the Steiner tree problem but also the underlying connectivity problem in line planning with inte- grated passenger routing. The integrated line planning and passenger routing problem is an important planning problem in service design of public transport and the topic of the second part. Given is the infrastructure network of a public transport system where the edges correspond to streets and tracks and the nodes correspond to stations/stops of lines. The task is to find paths in the infrastructure network for lines and passengers such that the capacities of the lines suffice to transport all passengers. Existing models in the literature that integrate a passenger routing in line planning either treat transfers in a rudimentary way and, hence, neglect an important aspect for the choice of the pas- senger routes, or they treat transfers in a too comprehensive way and cannot be solved for large scale real world problems. We propose a new model that focuses on direct connections. The attractiveness of transfer free connections is increased by introducing a transfer penalty for each non-direct connection. In this way, a passenger routing is computed that favors direct connections. For the computation of this model we also implemented algorithms influenced by the results for the Steiner connectivity problem. We can compute with our model good solutions that minimize a weighted sum of line operating costs and passengers travel times. These solutions improve the solutions of an existing approach, that does not consider direct connections, by up to 17\%. In contrast to a comprehensive approach, that considers every transfer and for which we could not even solve the root LP within 10 hours for large instances, the solutions of the new model, computed in the same time, are close to optimality (<1\%) or even optimal for real world instances. In a project with the Verkehr in Potsdam GmbH to compute the line plan for 2010 we showed that our approach is applicable in practice and can be used to solve real world problems.}, language = {en} } @phdthesis{Bujotzek, author = {Bujotzek, Alexander}, title = {Molecular Simulation of Multivalent Ligand-Receptor Systems}, language = {en} } @phdthesis{Gupta, author = {Gupta, Pooja}, title = {Integrating high-density marker information into the genetic evaluation of the honey bee}, language = {en} } @phdthesis{Winkelmann, author = {Winkelmann, Stefanie}, title = {Markov Decision Processes with Information Costs}, language = {en} } @phdthesis{Aiche, author = {Aiche, Stephan}, title = {Inferring Proteolytic Processes from Mass Spectrometry Time Series Data}, language = {en} } @phdthesis{Yousef, author = {Yousef, Kaveh}, title = {Stress responses in Escherichia coli and HIV as model systems of adaptation to the environment}, language = {en} } @phdthesis{Cardonha, author = {Cardonha, Carlos}, title = {Applied Methods for the Vehicle Positioning Problem}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-34211}, abstract = {This dissertation is dedicated to the Vehicle Positioning Problem (VPP), a classical combinatorial optimization problem in public transport in which vehicles should be assigned to parking positions in a depot in such a way that shunting moves are minimized. We investigate several models and solution methods to solve the VPP and the VPPp, a multi-periodic extension of the problem which was not previously studied. In the first part of the thesis, the basic version of the problem is introduced and several formulations, theoretical properties, and concepts are investigated. In particular, we propose a mixed integer quadratic constrained formulation of the VPP whose QP relaxation produces the first known nontrivial lower bound on the number of shunting moves. The second part of our work describes two advanced solution methods. In the first approach, a set partitioning formulation is solved by a branch-and-price framework. We present efficient algorithms for the pricing problem and in order to improve the performance of the framework, we introduce heuristics and discuss strategies to reduce symmetry. The second approach consists of an iterative technique in which we try to optimize an ILP by solving some of its projections, which are smaller and therefore easier to compute. Both techniques are able to produce satisfactory solutions for large-scale instances of the VPPp. In the third part, advanced aspects of the problem are investigated. We propose and analyze several solution methods for the VPP+ and for the VPPp+, which are extended and more challenging versions of the VPP and of the VPPp, respectively. Finally, the role of uncertainty in the problem is discussed. In particular, we introduce a new criteria to evaluate the robustness of assignment plans, a formulation based on this concept, and a new online algorithm for the VPP.}, language = {en} } @phdthesis{Hoang, author = {Hoang, Nam-Dung}, title = {Algorithmic Cost Allocation Games: Theory and Applications}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-28185}, language = {en} } @phdthesis{Schweiger, author = {Schweiger, Jonas}, title = {Exploiting structure in non-convex quadratic optimization and gas network planning under uncertainty}, pages = {411}, abstract = {The amazing success of computational mathematical optimization over the last decades has been driven more by insights into mathematical structures than by the advance of computing technology. In this vein, we address applications, where nonconvexity in the model and uncertainty in the data pose principal difficulties. The first part of the thesis deals with non-convex quadratic programs. Branch\&Bound methods for this problem class depend on tight relaxations. We contribute in several ways: First, we establish a new way to handle missing linearization variables in the well-known Reformulation-Linearization-Technique (RLT). This is implemented into the commercial software CPLEX. Second, we study the optimization of a quadratic objective over the standard simplex or a knapsack constraint. These basic structures appear as part of many complex models. Exploiting connections to the maximum clique problem and RLT, we derive new valid inequalities. Using exact and heuristic separation methods, we demonstrate the impact of the new inequalities on the relaxation and the global optimization of these problems. Third, we strengthen the state-of-the-art relaxation for the pooling problem, a well-known non-convex quadratic problem, which is, for example, relevant in the petrochemical industry. We propose a novel relaxation that captures the essential non-convex structure of the problem but is small enough for an in-depth study. We provide a complete inner description in terms of the extreme points as well as an outer description in terms of inequalities defining its convex hull (which is not a polyhedron). We show that the resulting valid convex inequalities significantly strengthen the standard relaxation of the pooling problem. The second part of this thesis focuses on a common challenge in real world applications, namely, the uncertainty entailed in the input data. We study the extension of a gas transport network, e.g., from our project partner Open Grid Europe GmbH. For a single scenario this maps to a challenging non-convex MINLP. As the future transport patterns are highly uncertain, we propose a robust model to best prepare the network operator for an array of scenarios. We develop a custom decomposition approach that makes use of the hierarchical structure of network extensions and the loose coupling between the scenarios. The algorithm used the single-scenario problem as black-box subproblem allowing the generalization of our approach to problems with the same structure. The scenario-expanded version of this problem is out of reach for today's general-purpose MINLP solvers. Yet our approach provides primal and dual bounds for instances with up to 256 scenarios and solves many of them to optimality. Extensive computational studies show the impact of our work.}, language = {en} } @phdthesis{DjurdjevacConrad, author = {Djurdjevac Conrad, Natasa}, title = {Methods for analyzing complex networks using random walker}, language = {en} } @phdthesis{Hupfeld2009, author = {Hupfeld, Felix}, title = {Causal weak-consistency replication - a systems approach}, url = {http://nbn-resolving.de/nbn:de:kobv:11-100100235}, year = {2009}, language = {en} } @phdthesis{Weber2006, author = {Weber, Marcus}, title = {Meshless Methods in Conformation Dynamics}, year = {2006}, language = {en} } @phdthesis{Koch2004, author = {Koch, Thorsten}, title = {Rapid Mathematical Programming}, year = {2004}, language = {en} } @phdthesis{Hiller2009, author = {Hiller, Benjamin}, title = {Online Optimization}, year = {2009}, language = {en} } @phdthesis{Borndoerfer1998, author = {Bornd{\"o}rfer, Ralf}, title = {Aspects of Set Packing, Partitioning, and Covering}, year = {1998}, language = {en} } @phdthesis{Nielsen, author = {Nielsen, Adam}, title = {Computation Schemes for Transfer Operators}, language = {en} } @phdthesis{Gleixner, author = {Gleixner, Ambros}, title = {Exact and Fast Algorithms for Mixed-Integer Nonlinear Programming}, publisher = {Logos Verlag Berlin}, isbn = {978-3-8325-4190-3}, pages = {341}, abstract = {Mixed-integer nonlinear programming (MINLP) comprises the broad class of finite-dimensional mathematical optimization problems from mixed-integer linear programming and global optimization. The combination of the two disciplines allows us to construct more accurate models of real-world systems, while at the same time it increases the algorithmic challenges that come with solving them. This thesis presents new methods that improve the numerical reliability and the computational performance of global MINLP solvers. Since state-of-the-art algorithms for nonconvex MINLP fundamentally rely on solving linear programming (LP) relaxations, we address numerical accuracy directly for LP by means of LP iterative refinement: a new algorithm to solve linear programs to arbitrarily high levels of precision. The thesis is supplemented by an exact extension of the LP solver SoPlex, which proves on average 1.85 to 3 times faster than current state-of-the-art software for solving general linear programs exactly over the rational numbers. These methods can be generalized to quadratic programming. We study their application to numerically difficult multiscale LP models for metabolic networks in systems biology. To improve the computational performance of LP-based MINLP solvers, we show how the expensive, but effective, bound-tightening technique called optimization-based bound tightening can be approximated more efficiently via feasibility-based bound tightening. The resulting implementation increases the number of instances that can be solved and reduces the average running time of the MINLP solver SCIP by 17-19\% on hard mixed-integer nonlinear programs. Last, we present branching rules that exploit the presence of nonlinear integer variables, i.e., variables both contained in nonlinear terms and required to be integral. The new branching rules prefer integer variables when performing spatial branching, and favor variables in nonlinear terms when resolving integer infeasibility. They reduce the average running time of SCIP by 17\% on affected instances. Most importantly, all of the new methods enable us to solve problems which could not be solved before, either due to their numerical complexity or because of limited computing resources.}, language = {en} } @phdthesis{Schaefer, author = {Sch{\"a}fer, Patrick}, title = {Scalable Time Series Similarity Search for Data Analytics}, language = {en} } @phdthesis{Kainmueller, author = {Kainm{\"u}ller, Dagmar}, title = {Deformable Meshes for Accurate Automatic Segmentation of Medical Image Data}, language = {en} } @phdthesis{Raack, author = {Raack, Christian}, title = {Capacitated Network Design - Multi-Commodity Flow Formulations, Cutting Planes, and Demand Uncertainty}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-36167}, school = {Technische Universit{\"a}t Berlin}, abstract = {In this thesis, we develop methods in mathematical optimization to dimension networks at minimal cost. Given hardware and cost models, the challenge is to provide network topologies and efficient capacity plans that meet the demand for network traffic (data, passengers, freight). We incorporate crucial aspects of practical interest such as the discrete structure of available capacities as well as the uncertainty of demand forecasts. The considered planning problems typically arise in the strategic design of telecommunication or public transport networks and also in logistics. One of the essential aspects studied in this work is the use of cutting planes to enhance solution approaches based on multi-commodity flow formulations. Providing theoretical and computational evidence for the efficacy of inequalities based on network cuts, we extend existing theory and algorithmic work in different directions. First, we prove that special-purpose techniques, originally designed to solve capacitated network design problems, can be successfully integrated into general-purpose mixed integer programming (MIP) solvers. Our approach relies on an automatic detection of network structure within the constraint matrix of general mixed in teger programs. More precisely, we identify multi-commodity (MCF) network sub-matrices and resolve the isomorphisms of the commodity blocks as well as the original graph structure. In the subsequent separation framework, we guide the constraint aggregation of available cutting plane procedures (e. g. based on mixed integer rounding) to produce strong cutting planes that reflect the structure of the constructed network. The new MCF-separator integrates network design specific methodology into general optimization tools which is of particular importance for practitioners that tend to use MIP solvers as black boxes. Extensive computational tests show that our network detection procedure operates accurately and reliably. Moreover, due to the generated cutting planes, we achieve an average speed-up of a factor of two for pure network design problems with general MIP solvers. Many of these instances can only be solved to optimality in reasonable time if the new MCF-separator is active. In 9 \% of the instances of general MIP test sets we find consistent embedded networks and generate violated inequalities. In this case the computation time decreases by 18 \% on average with almost no degradation for unaffected instances. Second, we generalize concepts, models, and cutting planes from deterministic network design to robust network design, incorporating the uncertainty of traffic demands. We enhance and compare strategies that are able to handle a polyhedral set of different traffic scenarios. In particular, we consider two correlated solution methods, based on separating extreme demand scenarios and dualizing the linear description of the demand polytope, respectively. We consider robust network design as two-stage robust optimization with recourse. First stage capacity decisions are fixed for all scenarios while the second stage flow depends on the realized demands. In order to reroute the traffic as a function of the demand dynamics, we consider three alternative recourse actions, namely, static, affine, and dynamic routing. We analyze properties of the new affine routing and show that it combines advantages of the well-known static and dynamic models. Using the concept of robust cut-set polyhedra and the corresponding lifting theorems, we develop several classes of facet-defining inequalities based on network cuts that can be used to further accelerate solution strategies for robust network design. Among them are the well-known (flow) cut-set inequalities, which we generalize to general demand polytopes, but also new classes of potential cutting planes, so-called envelope inequalities. The practical importance of the developed cutting planes is revealed by a series of computational tests. Similar to the results for the MCF-separator we achieve speed-ups of two and more using the generalized classes of strong inequalities. To evaluate the robustness of solutions that are computed with our framework we use real-life measurements of traffic dynamics from different existing telecommunication networks, among them data from the German and the European research network. Our results indicate that traffic peaks do not necessarily occur all simultaneously with respect to different source-destination pairs, which is of practical importance for the design of uncertainty sets. It is, in particular, not necessary to dimension networks for a scenario that assumes all source-destination traffic is at its peak simultaneously. With our solutions we save up to 20 \% of the corresponding solution cost compared to this artificial scenario and achieve comparable levels of robustness.}, language = {en} } @phdthesis{Goetschel, author = {G{\"o}tschel, Sebastian}, title = {Adaptive Lossy Trajectory Compression for Optimal Control of Parabolic PDEs}, abstract = {Optimal control problems governed by nonlinear, time-dependent PDEs on three-dimensional spatial domains are an important tool in many fields, ranging from engineering applications to medicine. For the solution of such optimization problems, methods working on the reduced objective functional are often employed to avoid a full spatio-temporal discretization of the problem. The evaluation of the reduced gradient requires one solve of the state equation forward in time, and one backward solve of the adjoint equation. The state enters into the adjoint equation, requiring the storage of a full 4D data set. If Newton-CG methods are used, two additional trajectories have to be stored. To get numerical results that are accurate enough, in many cases very fine discretizations in time and space are necessary, leading to a significant amount of data to be stored and transmitted to mass storage. This thesis deals with the development and analysis of methods for lossy compression of such finite element solutions. The algorithms are based on a change of basis to reduce correlations in the data, combined with quantization. This is achieved by transforming the finite element coefficient vector from the nodal to the hierarchical basis, followed by rounding the coefficients to a prescribed precision. Due to the inexact reconstruction, and thus inexact data for the adjoint equation, the error induced in the reduced gradient, and reduced Hessian, has to be controlled, to not impede convergence of the optimization. Accuracy requirements of different optimization methods are analyzed, and computable error estimates for the influence of lossy trajectory storage are derived. These tools are used to adaptively control the accuracy of the compressed data. The efficiency of the algorithms is demonstrated on several numerical examples, ranging from a simple linear, scalar equation to a semi-linear system of reaction-diffusion equations. In all examples considerable reductions in storage space and bandwidth requirements are achieved, without significantly influencing the convergence behavior of the optimization methods. Finally, to go beyond pointwise error control, the hierarchical basis transform can be replaced by more sophisticated wavelet transforms. Numerical experiments indicate that choosing suitable norms for error control allows higher compression factors.}, language = {en} } @phdthesis{Dovica, author = {Dovica, Ivan}, title = {Robust tail assignment}, abstract = {The first part of this thesis is devoted to the general problem of stochastic shortest path problem. It is about searching for the shortest path in a graph in which arc lengths are uncertain and specified by continuous random variables. This problem is at the core of various applications, especially in robust transportation planning where paths correspond to aircraft, train, or bus rotations, crew duties or rosters, etc. We propose a novel solution method based on a discretisation of random variables which is applicable to any class of continuous random variables. We also give bounds on the approximation error of the discretised path lengths compared to the continuous path lengths. In addition, we provide theoretical results for the computational complexity of this method. In the second part we apply this method to a real world airline transportation problem: the so-called tail assignment problem. The goal of the tail assignment problem is to construct aircraft rotations, routes consisting of flight segments, for a set of individual aircraft in order to cover a set of flight segments (legs) while considering operational constraints of each individual aircraft as well as short- to long-term individual maintenance requirements. We state a stochastic programming formulation of this problem and we show how to solve it efficiently by using our method within a column generation framework. We show the gain of our stochastic approach in comparison to standard KPI in terms of less propagated delay and thus less operational costs without growth of computational complexity. A key point of our complex approach to robust optimisation problem is the fit of the underlying stochastic model with reality. We propose a delay propagation model that is realistic, not overfitted, and can therefore be used for forecasting purposes. We benchmark our results using extensive simulation. We show a significant decrease of arrival delays and thus monetary savings on average as well as in the majority of our disruption scenarios. We confirm these benefits in even more life-like benchmarks as simulation where recovery actions are taken and in scenarios which use historical delays directly instead of the stochastic model.}, language = {en} } @phdthesis{Humpola2014, author = {Humpola, Jesco}, title = {Gas Network Optimization by MINLP}, school = {Technische Universit{\"a}t Berlin}, year = {2014}, abstract = {One quarter of Europe's energy demand is provided by natural gas distributed through a vast pipeline network covering the whole of Europe. At a cost of 1 million Euros per kilometer the extension of the European pipeline network is already a multi billion Euro business. The challenging question is how to expand and operate the network in order to facilitate the transportation of specified gas quantities at minimum cost. This task can be formulated as a mathematical optimization problem that reflects to real-world instances of enormous size and complexity. The aim of this thesis is the development of novel theory and optimization algorithms which make it possible to solve these problems. Gas network topology optimization problems can be modeled as nonlinear mixed-integer programs (MINLPs). Such an MINLP gives rise to a so-called active transmission problem (ATP), a continuous nonlinear non-convex feasibility problem which emerges from the MINLP model by fixing all integral variables. The key to solving the ATP as well as the overall gas network topology optimization problem and the main contribution of this thesis is a novel domain relaxation of the variable bounds and constraints in combination with a penalization in the objective function. In case the domain relaxation does not yield a primal feasible solution for the ATP we offer novel sufficient conditions for proving the infeasibility of the ATP. These conditions can be expressed in the form of an MILP, i.e., the infeasibility of a non-convex NLP can be certified by solving an MILP. These results provide an efficient bounding procedure in a branch-and-bound algorithm. If the gas network consists only of pipes and valves, the ATP turns into a passive transmission problem (PTP). Although its constraints are non-convex, its domain relaxation can be proven to be convex. Consequently, the feasibility of the PTP can be checked directly in an efficient way. Another advantage of the passive case is that the solution of the domain relaxation gives rise to a cutting plane for the overall topology optimization problem that expresses the infeasibility of the PTP. This cut is obtained by a Benders argument from the Lagrange function of the domain relaxation augmented by a specially tailored pc-regularization. These cuts provide tight lower bounds for the passive gas network topology optimization problem. The domain relaxation does not only provide certificates of infeasibility and cutting planes, it can also be used to construct feasible primal solutions. We make use of parametric sensitivity analysis in order to identify binary variables to be switched based on dual information. This approach allows for the first time to compute directly MINLP solutions for large-scale gas network topology optimization problems. All the research in this thesis has been realized within the collaborative research project "Forschungskooperation Netzoptimierung (ForNe)". The developed software is in use by the cooperation partner Open Grid Europe GmbH. Parts of this thesis have been published in book chapters, journal articles and technical reports. An overview of the topics and solution approaches within the research project is given by Martin et al. (2011) and F{\"u}genschuh et al. (2013). Gas network operation approaches and solution methods are described in detail by Pfetsch et al. (2014) and with a special focus on topology optimization in F{\"u}genschuh et al. (2011). The primal heuristic presented in this thesis is published by Humpola et al. (2014b). The method for pruning nodes of the branch-and-bound tree for an approximation of the original problem is described in F{\"u}genschuh and Humpola (2013) and Humpola et al. (2014a). The Benders like inequality is introduced by Humpola and F{\"u}genschuh (2013).}, language = {en} } @phdthesis{Kuhn, author = {Kuhn, Alexander}, title = {Lagrangian Methods for Visualization and Analysis of Time-dependent Vector Fields}, url = {http://nbn-resolving.de/urn:nbn:de:gbv:ma9:1-4177}, pages = {179}, abstract = {Time-dependent vector fields are of high relevance to describe a wide range of physical phenomena based on particle motion, including stall effects in technical engineering, blood flow anomalies, and atmospheric mass trans- port. The efficient analysis and fundamental understanding of intrinsic field properties can lead to significant improvements when interacting with such phenomena based on the available field data. So called Lagrangian methods are a particularly established technique for this purpose. They are based on the evaluation of time-dependent particle trajectories. This work will present an overview of the state of the art in time-dependent vector field analysis and visualization, with special focus on topology-oriented and Lagrangian methods. The first core aspect of this work is the introduction of a novel concept for a more objective and qualitative benchmark of existing Lagrangian approaches. Based on this benchmark, this work contributes and evaluates a set of novel concepts, that offer new perspectives towards established approaches with respect to computational handling, quality and efficiency. The second core aspect is the empirical application and validation of Lagrangian methods with respect to practically relevant analysis scenarios. Each application case includes an introduction into the underlying problem statement and an efficient solution using Lagrangian methods. The validation focuses on the comparison with existing flow analysis concepts and aspects of the quantitative evaluation in each case. Together, both topics of this work contribute towards a more consistent formalization, but also to the improved applicability of Lagrangian methods.}, language = {en} } @phdthesis{Zoeckler, author = {Z{\"o}ckler, Maja}, title = {Modellgebundene Cranioplastie - Operationstechnik zur Umformung fr{\"u}hkindlicher Sch{\"a}deldeformit{\"a}ten unter Verwendung dreidimensionaler Standardformmodelle aus MRT-basierten Rekonstruktionen nicht deformierter Kinder}, language = {de} } @phdthesis{Kubiack, author = {Kubiack, Kim}, title = {Erstellung eines statistischen Formmodells des Mittelgesichts zur Planung individueller Templates f{\"u}r die navigationsgest{\"u}tzte Rekonstruktion des Mittelgesichtssch{\"a}dels}, language = {de} } @phdthesis{Hildebrandt, author = {Hildebrandt, Thomas}, title = {Das Konzept der Rhinorespiratorischen Hom{\"o}ostase - ein neuer theoretischer Ansatz f{\"u}r die Diskussion physiologischer und physikalischer Zusammenh{\"a}nge bei der Nasenatmung}, language = {de} } @phdthesis{Tycowicz, author = {Tycowicz, Christoph von}, title = {Concepts and Algorithms for the Deformation, Analysis, and Compression of Digital Shapes}, doi = {10.17169/refubium-4697}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000096721-7}, abstract = {This thesis concerns model reduction techniques for the efficient numerical treatment of physical systems governing the deformation behavior of geometrically complex shapes. We present new strategies for the construction of simplified, low-dimensional models that capture the main features of the original complex system and are suitable for use in interactive computer graphics applications. To demonstrate the effectiveness of the new techniques we propose frameworks for real-time simulation and interactive deformation-based modeling of elastic solids and shells and compare them to alternative approaches. In addition, we investigate differential operators that are derived from the physical models and hence can serve as alternatives to the Laplace-Beltrami operator for applications in modal shape analysis. Furthermore, this thesis addresses the compression of digital shapes. In particular, we present a lossless compression scheme that is adapted to the special characteristics of adaptively refined, hierarchical meshes.}, language = {en} } @phdthesis{Berthold, author = {Berthold, Timo}, title = {Heuristic algorithms in global MINLP solvers}, publisher = {Dr. Hut Verlag}, isbn = {978-3-8439-1931-9}, pages = {366}, abstract = {In the literature for mixed integer programming, heuristic algorithms (particularly primal heuristics) are often considered as stand-alone procedures; in that context, heuristics are treated as an alternative to solving a problem to proven optimality. This conceals the fact that heuristic algorithms are a fundamental component of state-of-the-art global solvers for mixed integer linear programming (MIP) and mixed integer nonlinear programming (MINLP). In the present thesis, we focus on this latter aspect; we study heuristic algorithms that are tightly integrated within global MINLP solvers and analyze their impact on the overall solution process. Our contributions comprise generalizations of primal heuristics for MIP towards MINLP as well as novel ideas for MINLP primal heuristics and for heuristic algorithms to take branching decisions and to collect global information in MIP. These are: - Shift-and-Propagate, a novel propagation heuristic for MIP that does not require the solution to an LP relaxation, - a generic way to generalize large neighborhood search (LNS) heuristics from MIP to MINLP, - an Objective Feasibility Pump heuristic for nonconvex MINLP that uses second-order information and a dynamic selection of rounding procedures, - RENS, an LNS start heuristic for MINLP that optimizes over the set of feasible roundings of an LP solution, - Undercover, an LNS start heuristic for MINLP that solves a largest sub-MIP of a given MINLP, - Rapid Learning, a heuristic algorithm to generate globally valid conflict constraints for MIPs, - Cloud Branching, a heuristic algorithm that exploits dual degeneracy to reduce the number of candidates for branching variable selection. Additionally, we propose a new performance measure, the primal integral, that captures the benefits of primal heuristics better than traditional methods. In our computational study, we compare the performance of the MIP and MINLP solver SCIP with and without primal heuristics on six test sets with altogether 983 instances from academic and industrial sources, including our project partners ForNe, SAP, and Siemens. We observe that heuristics improve the solver performance regarding all measures that we used - by different orders of magnitude. We further see that the harder a problem is to solve to global optimality, the more important the deployment of primal heuristics becomes. The algorithms presented in this thesis are available in source code as part of the solver SCIP, of which the author has been a main developer for the last years. Methods described in this thesis have also been re-implemented within several commercial and noncommercial MIP and MINLP software packages, including Bonmin, CBC, Cplex, Gams, Sulum, and Xpress.}, language = {en} } @phdthesis{Barth, author = {Barth, Carlo}, title = {Analysis of photonic crystals for interaction with near-surface emitters}, doi = {10.14279/depositonce-6880}, language = {en} } @phdthesis{Reuther, author = {Reuther, Markus}, title = {Mathematical Optimization of Rolling Stock Rotations}, abstract = {We show how to optimize rolling stock rotations that are required for the operation of a passenger timetable. The underlying mathematical ptimization problem is called rolling stock rotation problem (RSRP) and the leitmotiv of the thesis is RotOR, i.e., a highly integrated optimization algorithm for the RSRP. RotOR is used by DB Fernverkehr AG (DBF) in order to optimize intercity express (ICE) rotations for the European high-speed network. In this application, RSRPs have to be solved which (A) require many different aspects to be simultaneously considered, (B) are typically of large scale, and (C) include constraints that have a difficult combinatorial structure. This thesis suggests answers to these issues via the following concepts. (A) The main model, which RotOR uses, relies on a hypergraph. The hypergraph provides an easy way to model manifold industrial railway requirements in great detail. This includes well known vehicle composition requirements as well as relatively unexplored regularity stipulations. At the same time, the hypergraph directly leads to a mixed-integer programming (MIP) model for the RSRP. (B) The main algorithmic ingredient to solve industrial instances of the RSRP is a coarse-to-fine (C2F) column generation procedure. In this approach, the hypergraph is layered into coarse and fine layers that distinguish different levels of detail of the RSRP. The coarse layers are algorithmically utilized while pricing fine columns until proven optimality. Initially, the C2F approach is presented in terms of pure linear programming in order to provide an interface for other applications. (C) Rolling stock rotations have to comply to resource constraints in order to ensure, e.g., enough maintenance inspections along the rotations. These constraints are computationally hard, but are well known in the literature on the vehicle routing problem (VRP). We define an interface problem in order to bridge between the RSRP and the VRP and derive a straightforward algorithmic concept, namely regional search (RS), from their common features and, moreover, differences. Our RS algorithms show promising results for classical VRPs and RSRPs. In the first part of the thesis we present these concepts, which encompass its main mathematical contribution. The second part explains all modeling and solving components of RotOR that turn out to be essential in its industrial application. The thesis concludes with a solution to a complex re-optimization RSRP that RotOR has computed successfully for DBF. In this application all ICE vehicles of the ICE-W fleets of DBF had to be redirected past a construction site on a high-speed line in the heart of Germany.}, language = {en} } @phdthesis{Beckenbach2019, author = {Beckenbach, Isabel}, title = {Matchings and Flows in Hypergraphs}, year = {2019}, abstract = {In this dissertation, we study matchings and flows in hypergraphs using combinatorial methods. These two problems are among the best studied in the field of combinatorial optimization. As hypergraphs are a very general concept, not many results on graphs can be generalized to arbitrary hypergraphs. Therefore, we consider special classes of hypergraphs, which admit more structure, to transfer results from graph theory to hypergraph theory. In Chapter 2, we investigate the perfect matching problem on different classes of hypergraphs generalizing bipartite graphs. First, we give a polynomial time approximation algorithm for the maximum weight matching problem on so-called partitioned hypergraphs, whose approximation factor is best possible up to a constant. Afterwards, we look at the theorems of K{\"o}nig and Hall and their relation. Our main result is a condition for the existence of perfect matchings in normal hypergraphs that generalizes Hall's condition for bipartite graphs. In Chapter 3, we consider perfect f-matchings, f-factors, and (g,f)-matchings. We prove conditions for the existence of (g,f)-matchings in unimodular hypergraphs, perfect f-matchings in uniform Mengerian hypergraphs, and f-factors in uniform balanced hypergraphs. In addition, we give an overview about the complexity of the (g,f)-matching problem on different classes of hypergraphs generalizing bipartite graphs. In Chapter 4, we study the structure of hypergraphs that admit a perfect matching. We show that these hypergraphs can be decomposed along special cuts. For graphs it is known that the resulting decomposition is unique, which does not hold for hypergraphs in general. However, we prove the uniqueness of this decomposition (up to parallel hyperedges) for uniform hypergraphs. In Chapter 5, we investigate flows on directed hypergraphs, where we focus on graph-based directed hypergraphs, which means that every hyperarc is the union of a set of pairwise disjoint ordinary arcs. We define a residual network, which can be used to decide whether a given flow is optimal or not. Our main result in this chapter is an algorithm that computes a minimum cost flow on a graph-based directed hypergraph. This algorithm is a generalization of the network simplex algorithm.}, language = {en} } @phdthesis{Mangalgiri, author = {Mangalgiri, Gauri}, title = {Development of Titanium Dioxide Metasurfaces and Nanosoupbowls for Optically Enhancing Silicon Photocathodes}, doi = {10.18452/20160}, language = {en} } @phdthesis{Kruber, author = {Kruber, Nico}, title = {Approximate Distributed Set Reconciliation with Defined Accuracy}, doi = {10.18452/21294}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:11-110-18452/22105-6}, language = {en} } @phdthesis{Ehlke2020, author = {Ehlke, Moritz}, title = {3D Reconstruction of Anatomical Structures from 2D X-ray Images}, doi = {https://doi.org/10.14279/depositonce-11553}, year = {2020}, language = {en} } @phdthesis{Santiago, author = {Santiago, Xavier Garcia}, title = {Numerical methods for shape optimization of photonic nanostructures}, organization = {Karlsruher Institut f{\"u}r Technologie}, doi = {10.5445/IR/1000131006}, language = {en} } @phdthesis{Venkatareddy, author = {Venkatareddy, Narendra Lagumaddepalli}, title = {Revealing secrets of mussel-glue mimetic peptides - From advanced NMR to computational process modelling}, language = {en} } @phdthesis{Eshtewy, author = {Eshtewy, Neveen Ali Salem}, title = {Mathematical Modeling of Metabolic-Genetic Networks}, language = {en} } @phdthesis{Reuter, author = {Reuter, Bernhard}, title = {Generalisierte Markov-Modellierung von Nichtgleichgewichtssystemen - Simulation und Modellierung der Amyloid-beta(1-40)-Konformationsdynamik unter Mikrowelleneinfluss}, language = {de} } @phdthesis{Quer, author = {Quer, Jannes}, title = {Importance Sampling for metastable dynamical systems in molecular dynamics}, language = {en} } @phdthesis{Klimm, author = {Klimm, Martina}, title = {New Strategies in Conformation Dynamics}, language = {en} } @phdthesis{Villatoro, author = {Villatoro, Jos{\´e}}, title = {A combined approach for the analysis of biomolecules using IR-MALDI ion mobility spectrometry and molecular dynamics simulations of peptide ions in the gas phase}, language = {en} } @phdthesis{Wohlfeil, author = {Wohlfeil, Benjamin}, title = {Integrated fiber grating couplers in silicon photonics}, doi = {10.14279/depositonce-4521}, language = {en} } @phdthesis{Omari, author = {Omari, Mohamed}, title = {A Mathematical Model of Bovine Metabolism and Reproduction: Application to Feeding Strategies, Drug Administration and Experimental Design}, language = {en} } @phdthesis{Reidelbach, author = {Reidelbach, Marco}, title = {Optimal Network Generation for the Simulation of Proton Transfer Processes}, language = {en} } @phdthesis{Pulaj, author = {Pulaj, Jonad}, title = {Cutting Planes for Union-Closed Families}, abstract = {Frankl's (union-closed sets) conjecture states that for any nonempty finite union-closed (UC) family of distinct sets there exists an element in at least half of the sets. Poonen's Theorem characterizes the existence of weights which determine whether a given UC family ensures Frankl's conjecture holds for all UC families which contain it. The weight systems are nontrivial to identify for a given UC family, and methods to determine such weight systems have led to several other open questions and conjectures regarding structures in UC families. We design a cutting-plane method that computes the explicit weights which imply the existence conditions of Poonen's Theorem using computational integer programming coupled with redundant verification routines that ensure correctness. We find over one hundred previously unknown families of sets which ensure Frankl's conjecture holds for all families that contain any of them. This improves significantly on all previous results of the kind. Our framework allows us to answer several open questions and conjectures regarding structural properties of UC families, including proving the 3-sets conjecture of Morris from 2006 which characterizes the minimum number of 3-sets that ensure Frankl's conjecture holds for all families that contain them. Furthermore, our method provides a general algorithmic road-map for improving other known results and uncovering structures in UC families.}, language = {en} } @phdthesis{Pakhomov, author = {Pakhomov, Anton}, title = {Efficient modeling and optimization of surface second-harmonic generation from nanophotonic components}, doi = {10.22032/dbt.49369}, language = {en} } @phdthesis{Moldenhauer, author = {Moldenhauer, Marian}, title = {Adaptive Algorithms in Optimization under PDE Constraints}, pages = {106}, abstract = {In this thesis, adaptive algorithms in optimization under PDE constraints have been inves- tigated. In its application, the aim of optimization is to increase the longevity of implants, namely the hip joint implant, and in doing so to minimize stress shielding and simultaneously minimize the influence of locally high stresses, that, above a threshold value, are malign to the bone structure. Under the constraint of the equilibrium of forces, describing an elastodynamic setup, coupled with a contact inequality condition, a computationally expensive problem formulation is given. The first step to make the solution of the given problem possible and efficient was to change over to the spatial equilibrium equation, thus rendering an elastostatic setup. Subsequently the intrinsically dynamic motions - trajectories in the load domain - were converted to the static setup. Thus, the trajectories are marginalized to the load domain and characterized with probability distributions. Therefore the solving of the PDE constraint, the contact problem, is simplified. Yet in the whole optimization process, the solving of the PDE, the spatial equilibrium equation together with the contact condition has the most expensive contribution still and hence needed further reduction. This was achieved by application of Kriging interpolation to the load responses of the integrated distribution of stress difference and the maximum stresses. The interpolation of the two response surfaces only needs comparatively few PDE solves to set up the models. Moreover, the Kriging models can be adaptively extended by sequentially adding sample-response pairs. For this the Kriging inherent variance is used to estimate ideal new sample locations with maximum variance values. In doing so, the overall interpolation variance and therefore the interpolation error is reduced. For the integration of the integrated stress differences and penalty values on the relative high dimensional load domain Monte Carlo integration was implemented, averting the curse of dimension. Here, the motion's probability distribution combined with patient specific data of motion frequencies is taken advantage of, making obsolete the use of the otherwise necessary importance sampling. Throughout the optimization, the FE-discretization error and the subsequently attached errors entering the solution process via PDE discretization and approximative solving of the PDE, Kriging interpolation and Monte Carlo integration need to decrease. While the FE-discretization error and the solution of the elastostatic contact problem were assumed precise enough, numerics showed, that the interpolation and integration errors can be controlled by adaptive refinement of the respective methods. For this purpose comparable error quantities for the particular algorithms were introduced and effectively put to use. For the implant position's optimization, the derivative of the objective function was derived using the implicit function theorem. As the FE-discretization changes with implant position modifications big enough, a special line search had to be used to deal with the discontinuities in the objective function. The interplay and performance of the subalgorithms was demonstrated numerically on a reduced 2D setup of a hip joint with and without the implant. Consequently the load domain and the control variable were also limited to the 2D case.}, language = {en} } @phdthesis{Baum2007, author = {Baum, Daniel}, title = {A Point-Based Algorithm for Multiple 3D Surface Alignment of Drug-Sized Molecules}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000002759-2}, school = {Freie Universit{\"a}t Berlin}, year = {2007}, abstract = {One crucial step in virtual drug design is the identification of new lead structures with respect to a pharmacological target molecule. The search for new lead structures is often done with the help of a pharmacophore, which carries the essential structural as well as physico-chemical properties that a molecule needs to have in order to bind to the target molecule. In the absence of the target molecule, such a pharmacophore can be established by comparison of a set of active compounds. In order to identify their common features,a multiple alignment of all or most of the active compounds is necessary. Moreover, since the "outer shape" of the molecules plays a major role in the interaction between drug and target, an alignment algorithm aiming at the identification of common binding properties needs to consider the molecule's "outer shape", which can be approximated by the solvent excluded surface. In this thesis, we present a new approach to molecular surface alignment based on a discrete representation of shape as well as physico-chemical properties by points distributed on the solvent excluded surface. We propose a new method to distribute points regularly on a surface w.r.t. a smoothly varying point density given on that surface. Since the point distribution algorithm is not restricted to molecular surfaces, it might also be of interest for other applications. For the computation of pairwise surface alignments, we extend an existing point matching scheme to surface points, and we develop an efficient data structure speeding up the computation by a factor of three. Moreover, we present an approach to compute multiple alignments from pairwise alignments, which is able to handle a large number of surface points. All algorithms are evaluated on two sets of molecules: eight thermolysin inhibitors and seven HIV-1 protease inhibitors. Finally, we compare the results obtained from surface alignment with the results obtained by applying an atom alignment approach.}, language = {en} } @phdthesis{Prohaska, author = {Prohaska, Steffen}, title = {Skeleton-based visualization of massive voxel objects with network-like architecture}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:517-opus-14888}, abstract = {This work introduces novel internal and external memory algorithms for computing voxel skeletons of massive voxel objects with complex network-like architecture and for converting these voxel skeletons to piecewise linear geometry, that is triangle meshes and piecewise straight lines. The presented techniques help to tackle the challenge of visualizing and analyzing 3d images of increasing size and complexity, which are becoming more and more important in, for example, biological and medical research. Section 2.3.1 contributes to the theoretical foundations of thinning algorithms with a discussion of homotopic thinning in the grid cell model. The grid cell model explicitly represents a cell complex built of faces, edges, and vertices shared between voxels. A characterization of pairs of cells to be deleted is much simpler than characterizations of simple voxels were before. The grid cell model resolves topologically unclear voxel configurations at junctions and locked voxel configurations causing, for example, interior voxels in sets of non-simple voxels. A general conclusion is that the grid cell model is superior to indecomposable voxels for algorithms that need detailed control of topology. Section 2.3.2 introduces a noise-insensitive measure based on the geodesic distance along the boundary to compute two-dimensional skeletons. The measure is able to retain thin object structures if they are geometrically important while ignoring noise on the object's boundary. This combination of properties is not known of other measures. The measure is also used to guide erosion in a thinning process from the boundary towards lines centered within plate-like structures. Geodesic distance based quantities seem to be well suited to robustly identify one- and two-dimensional skeletons. Chapter 6 applies the method to visualization of bone micro-architecture. Chapter 3 describes a novel geometry generation scheme for representing voxel skeletons, which retracts voxel skeletons to piecewise linear geometry per dual cube. The generated triangle meshes and graphs provide a link to geometry processing and efficient rendering of voxel skeletons. The scheme creates non-closed surfaces with boundaries, which contain fewer triangles than a representation of voxel skeletons using closed surfaces like small cubes or iso-surfaces. A conclusion is that thinking specifically about voxel skeleton configurations instead of generic voxel configurations helps to deal with the topological implications. The geometry generation is one foundation of the applications presented in Chapter 6. Chapter 5 presents a novel external memory algorithm for distance ordered homotopic thinning. The presented method extends known algorithms for computing chamfer distance transformations and thinning to execute I/O-efficiently when input is larger than the available main memory. The applied block-wise decomposition schemes are quite simple. Yet it was necessary to carefully analyze effects of block boundaries to devise globally correct external memory variants of known algorithms. In general, doing so is superior to naive block-wise processing ignoring boundary effects. Chapter 6 applies the algorithms in a novel method based on confocal microscopy for quantitative study of micro-vascular networks in the field of microcirculation.}, language = {en} } @phdthesis{Schlechte, author = {Schlechte, Thomas}, title = {Railway Track Allocation: Models and Algorithms}, publisher = {S{\"u}dwestdeutscher Verlag f{\"u}r Hochschulschriften}, address = {Saarbr{\"u}cken, Germany}, isbn = {978-3-8381-3222-8}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-34272}, school = {Technische Universit{\"a}t Berlin}, pages = {239}, abstract = {This thesis is about mathematical optimization for the efficient use of railway infrastructure. We address the optimal allocation of the available railway track capacity - the track allocation problem. This track allocation problem is a major challenge for a railway company, independent of whether a free market, a private monopoly, or a public monopoly is given. Planning and operating railway transportation systems is extremely hard due to the combinatorial complexity of the underlying discrete optimization problems, the technical intricacies, and the immense sizes of the problem instances. Mathematical models and optimization techniques can result in huge gains for both railway customers and operators, e.g., in terms of cost reductions or service quality improvements. We tackle this challenge by developing novel mathematical models and associated innovative algorithmic solution methods for large scale instances. This allows us to produce for the first time reliable solutions for a real world instance, i.e., the Simplon corridor in Switzerland. The opening chapter gives a comprehensive overview on railway planning problems. This provides insights into the regulatory and technical framework, it discusses the interaction of several planning steps, and identifies optimization potentials in railway transportation. The remainder of the thesis is comprised of two major parts. The first part is concerned with modeling railway systems to allow for resource and capacity analysis. Railway capacity has basically two dimensions, a space dimension which are the physical infrastructure elements as well as a time dimension that refers to the train movements, i.e., occupation or blocking times, on the physical infrastructure. Railway safety systems operate on the same principle all over the world. A train has to reserve infrastructure blocks for some time to pass through. Two trains reserving the same block of the infrastructure within the same point in time is called block conflict. Therefore, models for railway capacity involve the definition and calculation of reasonable running and associated reservation and blocking times to allow for a conflict free allocation. In the second and main part of the thesis, the optimal track allocation problem for macroscopic models of the railway system is considered. The literature for related problems is surveyed. A graph-theoretic model for the track allocation problem is developed. In that model optimal track allocations correspond to conflict-free paths in special time-expanded graphs. Furthermore, we made considerable progress on solving track allocation problems by two main features - a novel modeling approach for the macroscopic track allocation problem and algorithmic improvements based on the utilization of the bundle method. Finally, we go back to practice and present in the last chapter several case studies using the tools netcast and tsopt. We provide a computational comparison of our new models and standard packing models used in the literature. Our computational experience indicates that our approach, i.e., ``configuration models'', outperforms other models. Moreover, the rapid branching heuristic and the bundle method enable us to produce high quality solutions for very large scale instances, which has not been possible before. In addition, we present results for a theoretical and rather visionary auction framework for track allocation. We discuss several auction design questions and analyze experiments of various auction simulations. The highlights are results for the Simplon corridor in Switzerland. We optimized the train traffic through this tunnel using our models and software tools. To the best knowledge of the author and confirmed by several railway practitioners this was the first time that fully automatically produced track allocations on a macroscopic scale fulfill the requirements of the originating microscopic model, withstand the evaluation in the microscopic simulation tool OpenTrack, and exploit the infrastructure capacity. This documents the success of our approach in practice and the usefulness and applicability of mathematical optimization to railway track allocation.}, language = {en} } @phdthesis{Lamecker, author = {Lamecker, Hans}, title = {Variational and statistical shape modeling for 3D geometry reconstruction}, abstract = {The reconstruction of geometric shapes plays an important role in many biomedical applications. One example is the patient-specific, computer-aided planning of complex interventions, which requires the generation of explicitly represented geometric models of anatomical structures from medical image data. Only solutions that require minimal interaction by medical personnel are likely to enter clinical routine. Another example is the planning of surgical corrections of deformities where the target shape is unknown. Surgeons are often forced to resort to subjective criteria. These applications still pose highly challenging reconstruction problems, which are addressed in this thesis. The fundamental hypothesis, pursued in this thesis, is that the problems can be solved by incorporating a-priori knowledge about shape and other application-specific characteristics. Here, we focus mainly on the aspect of geometric shape analysis. The basic idea is to capture the most essential variations of a certain class of geometric objects via statistical shape models, which model typical features contained in a given population, and restrict the outcome of a reconstruction algorithm (more or less) to the space spanned by such models. A fundamental prerequisite for performing statistical shape analysis on a set of different objects is the identification of corresponding points on their associated surfaces. This problem is particularly difficult to solve if the shapes stem from different individuals. The reason lies in the basic difficulty of defining suitable measures of similarity. In this thesis, we divide the correspondence problem into feature and non-feature matching. The feature part depends on the application, while the non-feature part can be characterized by a purely geometric description. We propose two different approaches. The first approach has proved useful in many applications. Yet, it suffers from some practical limitations and does not yield a measure of similarity. Our second, variational, approach is designed to overcome these limitations. In it, we propose to minimize an invariant stretching measure, constrained by previously computed features. An important property, which sets our method apart from previous work, is that it does not require the computation of a global surface parameterization.}, language = {en} } @phdthesis{Durmaz, author = {Durmaz, Vedat}, title = {Atomistic Binding Free Energy Estimations for Biological Host-Guest Systems}, publisher = {FU Dissertationen Online}, pages = {216}, abstract = {Accurate quantifications of protein-ligand binding affinities by means of in silico methods increasingly gain importance in various scientific branches including toxicology and pharmacology. In silico techniques not only are generally less demanding than laboratory experiments regarding time as well as cost, in particular, if binding assays or synthesis protocols need to be developed in advance. At times, they also provide the only access to risk assessments on novel chemical compounds arising from biotic or abiotic degradation of anthropogenic substances. However, despite the continuous technological and algorithmic progress over the past decades, binding free energy estimations through molecular dynamics simulations still pose an enormous computational challenge owed to the mathematical complexity of solvated macromolecular systems often consisting of hundreds of thousands of atoms. The goals of this thesis can roughly be divided into two categories dealing with different aspects of host-guest binding quantification. On the one side algorithmic strategies for a comprehensive exploration and decomposition of conformational space in conjunction with an automated selection of representative molecular geometries and binding poses have been elaborated providing initial structures for free energy calculations. In light of the dreaded trapping problem typically associated with molecular dynamics simulations, the focus was laid on a particularly systematic generation of representatives covering a broad range of physically accessible molecular conformations and interaction modes. On the other side and ensuing from these input geometries, binding affinity models based on the linear interaction energy (LIE) method have been developed for a couple of (bio)molecular systems. The applications included a successful prediction of the liquid-chromatographic elution order as well as retention times of highly similar hexabromocyclododecane (HBCD) stereoisomers, a novel empirical LIE-QSAR hybrid binding affinity model related to the human estrogen receptor α (ERα), and, finally, the (eco)toxicological prioritization of transformation products originating from the antibiotic sulfamethoxazole with respect to their binding affinities to the bacterial enzyme dihydropteroate synthase. Altogether, a fully automated approach to binding mode and affinity estimation has been presented that is content with an arbitrary geometry of a small molecule under observation and a spatial vector specifying the binding site of a potential target molecule. According to our studies, it is superior to conventional docking and thermodynamic average methods and primarily suggesting binding free energy calculation on the basis of several heavily distinct complex geometries. Both chromatographic retention times of HBCD and binding affinities to ERα yielded squared coefficients of correlation with experimental results significantly higher than 0.8. Approximately 85 \% (100 \%) of predicted receptor-ligand binding modes deviated less than 1.53 {\AA} (2.05 {\AA}) from available crystallographic structures.}, language = {en} } @phdthesis{MoualeuNgangue, author = {Moualeu-Ngangue, Dany Pascal}, title = {A Mathematical Tuberculosis Model in Cameroon}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000095327-7}, school = {Freie Universit{\"a}t Berlin}, pages = {154}, abstract = {This thesis firstly presents a nonlinear extended deterministic model for the transmission dynamics of tuberculosis, based on realistic assumptions and data collected from the WHO. This model enables a comprehensive qualitative analysis of various aspects in the outbreak and control of tuberculosis in Sub-Saharan Africa countries and successfully reproduces the epidemiology of tuberculosis in Cameroon for the past (from 1994-2010). Some particular properties of the model and its solution have been presented using the comparison theorem applied to the theory of differential equations. The existence and the stability of a disease free equilibrium has been discussed using the Perron-Frobenius theorem and Metzler stable matrices. Furthermore, we computed the basic reproduction number, i.e. the number of cases that one case generates on average over the course of its infectious period. Rigorous qualitative analysis of the model reveals that, in contrast to the model without reinfections, the full model with reinfection exhibits the phenomenon of backward bifurcation, where a stable disease-free equilibrium coexists with a stable endemic equilibrium when a certain threshold quantity, known as the basic reproduction ratio (R0), is less than unity. The global stability of the disease-free equilibrium has been discussed using the concepts of Lyapunov stability and bifurcation theory. With the help of a sensitivity analysis using data of Cameroon, we identified the relevant parameters which play a key role for the transmission and the control of the disease. This was possible applying sophisticated numerical methods (POEM) developed at ZIB. Using advanced approaches for optimal control considering the costs for chemoprophylaxis, treatment and educational campaigns should provide a framework for designing realistic cost effective strategies with different intervention methods. The forward-backward sweep method has been used to solve the numerical optimal control problem. The numerical result of the optimal control problem reveals that combined effort in education and chemoprophylaxis may lead to a reduction of 80\\% in the number of infected people in 10 years. The mathematical and numerical approaches developed in this thesis could be similarly applied in many other Sub-Saharan countries where TB is a public health problem.}, language = {en} } @phdthesis{Achterberg, author = {Achterberg, Tobias}, title = {Constraint Integer Programming}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:83-opus-16117}, abstract = {This thesis introduces the novel paradigm of constraint integer programming (CIP), which integrates constraint programming (CP) and mixed integer programming (MIP) modeling and solving techniques. It is supplemented by the software SCIP, which is a solver and framework for constraint integer programming that also features SAT solving techniques. SCIP is freely available in source code for academic and non-commercial purposes. Our constraint integer programming approach is a generalization of MIP that allows for the inclusion of arbitrary constraints, as long as they turn into linear constraints on the continuous variables after all integer variables have been fixed. The constraints, may they be linear or more complex, are treated by any combination of CP and MIP techniques: the propagation of the domains by constraint specific algorithms, the generation of a linear relaxation and its solving by LP methods, and the strengthening of the LP by cutting plane separation. The current version of SCIP comes with all of the necessary components to solve mixed integer programs. In the thesis, we cover most of these ingredients and present extensive computational results to compare different variants for the individual building blocks of a MIP solver. We focus on the algorithms and their impact on the overall performance of the solver. In addition to mixed integer programming, the thesis deals with chip design verification, which is an important topic of electronic design automation. Chip manufacturers have to make sure that the logic design of a circuit conforms to the specification of the chip. Otherwise, the chip would show an erroneous behavior that may cause failures in the device where it is employed. An important subproblem of chip design verification is the property checking problem, which is to verify whether a circuit satisfies a specified property. We show how this problem can be modeled as constraint integer program and provide a number of problem-specific algorithms that exploit the structure of the individual constraints and the circuit as a whole. Another set of extensive computational benchmarks compares our CIP approach to the current state-of-the-art SAT methodology and documents the success of our method.}, language = {en} } @phdthesis{Loebel, author = {L{\"o}bel, Andreas}, title = {Optimal Vehicle Scheduling in Public Transit}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-10169}, abstract = {Die vorliegende Dissertation besch{\"a}ftigt sich mit der Optimierung der Fahrzeugeinsatzplanung im {\"o}ffentlichen Personennahverkehr. Dieses Problem ist f{\"u}r die meisten praxisrelevanten F{\"a}lle schwierig (Ar'P-schwer). In dieser Arbeit pr{\"a}sentieren wir Methoden der ganzzahligen linearen Programmierung zur L{\"o}sung dieses Planungsproblems. "Vern{\"u}nftige" mathematische Formulierungen des Fahrzeugeinsatzplanungsproblems basieren auf Netzwerkfluß-Modellen und ent sprechenden ganzzahligen linearen Programmen (LP). Dies sind sogenannte bogenorientierte Mehrg{\"u}terfluß-Modelle bzw. pfadorientierte SetPartitioning-Modelle. Wir besch{\"a}ftigen uns mit beiden Ans{\"a}tzen, der Schwerpunkt liegt aber auf dem bogenorientierte Mehrg{\"u}terfluß-Modell Mathematisch bearbeiten wir diese Modelle mit Branch-und-Cut- bzw. Branch-und-Cutund-Price-Methoden. Reale Anwendungen f{\"u}hren zu riesigen LPs mit einigen Millionen ganzzahligen Variablen. Die Behandlung solcher LPs erfordert Spalten-Erzeugungs- Verfahren (auch Column-Generation-Verfahren genannt). Basierend auf Lagrange-Relaxationen entwickeln wir hierzu neue Verfahren zur Auswahl der zu erzeugenden Spalten, die wir Lagrange-Pricing nennen. Lagrange-Pricing-Techniken haben es erstmalig erm{\"o}glicht, LPs dieser Art mit rund 70 Millionen Variablen zu l{\"o}sen. F{\"u}r den bogenorientierten (Mehrg{\"u}ter)Fluß-Zugang beschreiben wir ausf{\"u}hrlich, wie Lagrange-Relaxationen sowie die LP-Relaxation effizient gel{\"o}st werden. Zus{\"a}tzlich schlagen wir eine Heuristik vor, die schnell gute L{\"o}sungen erzeugt. Diese Heuristik beruht auf einem sog. Schedule-FirstClusterSecond-Ansatz. Eine zentrale Aufgabe bei der L{\"o}sung dieser primalen und dualen Probleme ist dabei die effiziente Behandlung von Problemen mit einem Depot. Wir zeigen, daß das bogenorientierte Mehrg{\"u}terfluß-Modell durch eine geeignete Anwendung der Dantzig-Wolfe-Dekomposition in ein pfadorientiertes SetPartitioning-Modell {\"u}berf{\"u}hrt werden kann. Der zweite Teil dieser Arbeit pr{\"a}sentiert die Rechenergebnisse zu den von uns entwickelten und implementierten Verfahren. Diese Untersuchungen basieren auf realen Testdaten von drei großen deutschen Nahverkehrsunternehmen. Die implementierten Codes arbeiten zuverl{\"a}ssig und stabil. Die mit diesen Verfahren durchgef{\"u}hrten Testl{\"a}ufe lieferten hervorragende Ergebnisse: Bis auf ein Problem k{\"o}nnen alle Beispiele optimal gel{\"o}st werden. Die L{\"o}sungen des Branch-and-Cut-Verfahrens wurden auch mit den Planungsergebnissen der in der Praxis gegenw{\"a}rtig eingesetzten Verfahren verglichen: Wir konnten zus{\"a}tzlich mehrere Fahrzeuge einsparen sowie eine Kostenreduktion von bis zu 10 \% aufzeigen. Der m{\"o}gliche Nutzen dieser Methoden ist enorm. Beispielsweise rechnet die BVG damit, den Planungsprozeß mit den von uns entwickelten Softwaretools deutlich straffen und j{\"a}hrlich Einsparungen in H{\"o}he von rund 100 Millionen Mark erzielen zu k{\"o}nnen, siehe den Artikel Auf Sparkurs zum Ziel im Rheinischer Merkur, Nummer 39, von Schmidt [1997] Teile der vorgestellten Methoden wurden bereits in die Planungssysteme BERTA (der Berliner Verkehrsbetriebe (BVG)) und MICROBUS II (der IVU Gesellschaft f{\"u}r Informatik, Verkehrs und Umweltplanung mbH, Berlin) integriert. Dar{\"u}ber hinaus hat auch die Forschungsabteilung der SIEMENS AG in M{\"u}nchen dieses System erworben.}, language = {en} } @phdthesis{Stalling, author = {Stalling, Detlev}, title = {Fast Texture-Based Algorithms for Vector Field Visualization}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-3838}, number = {SC-98-40}, abstract = {In this thesis we develop new methods for visualizing vector fields which specifically address three design goals: accuracy, performance, and cognition. Our methods will be general-purpose and can be applied to arbitrary vector fields in two- and three-dimensional space. The methodology behind our approach is {\em texture-based visualization}. Texture-based visualization methods imitate techniques known from experimental flow visualization, namely, the observation of randomly dispersed particles or dye injection patterns. Instead of depicting individual lines or symbols, a contiguous high-resolution image or texture is generated. This texture clearly reveals the directional structure of the field. In this way intuitive insight can be obtained and even small details of the field become visible. However, it remains unclear what kind of textures are best suited for our purpose. How do we generate them? Can we apply these methods in three-dimensional space? In particular, we focus on a technique known as {\em line integral convolution} or LIC. This method turns out to be quite versatile and well-suited for visualizing many interesting vector fields. LIC images display the integral curves or field lines of a vector field at high spatial resolution. Although conceptually quite simple, line integral convolution implies a number of interesting mathematical and algorithmic questions.}, language = {en} } @phdthesis{Sahu, author = {Sahu, Manish}, title = {Vision-based Context-awareness in Minimally Invasive Surgical Video Streams}, pages = {103}, abstract = {Surgical interventions are becoming increasingly complex thanks to modern assistance systems (imaging, robotics, etc.). Minimally invasive surgery in particular places high demands on surgeons due to added surgical complexity and information overload. Therefore, there is a growing need of developing context-aware systems that recognize the current surgical situation in order to derive and present the relevant information to the surgical staff for assistance. Current approaches for deriving contextual cues either utilize specialized hardware that is disruptive to the surgical workflow, or utilize vision-based approaches that require valuable time of surgeons, especially for manual annotations. The main objective of this cumulative dissertation is to improve the existing approaches for three important sub-problems of vision-based context-aware systems, namely surgical phase recognition, surgical instrument recognition and surgical instrument segmentation, while tackling the vision and manual annotation challenges related to these problems. This dissertation demonstrates that vision-based approaches for the three named clinical sub-problems of context-aware systems can be developed in an annotation-scarce setting by employing: domain-specific, deep learning based transfer learning techniques for the surgical instrument and phase recognition tasks; and deep learning based simulation-to-real unsupervised domain adaptation techniques for the surgical instrument segmentation task. The efficacy and real-time performance of the developed approaches have been evaluated on publicly available datasets containing real surgical videos (laparoscopic procedures) that were acquired in an uncontrolled surgical environment. These proposed approaches advance the state-of-the-art for the aforementioned research problems of context-aware systems in the OR and can potentially be utilized for real-time notification of the surgical phase, surgical instrument usage and image-based localization of surgical instruments.}, language = {en} } @phdthesis{Ambellan, author = {Ambellan, Felix}, title = {Efficient Riemannian Statistical Shape Analysis with Applications in Disease Assessment}, doi = {10.17169/refubium-36729}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-refubium-37016-3}, abstract = {In this work, we address the challenge of developing statistical shape models that account for the non-Euclidean nature inherent to (anatomical) shape variation and at the same time offer fast, numerically robust processing and as much invariance as possible regarding translation and rotation, i.e. Euclidean motion. With the aim of doing that we formulate a continuous and physically motivated notion of shape space based on deformation gradients. We follow two different tracks endowing this differential representation with a Riemannian structure to establish a statistical shape model. (1) We derive a model based on differential coordinates as elements in GL(3)+. To this end, we adapt the notion of bi-invariant means employing an affine connection structure on GL(3)+. Furthermore, we perform second-order statistics based on a family of Riemannian metrics providing the most possible invariance, viz. GL(3)+-left-invariance and O(3)-right-invariance. (2) We endow the differential coordinates with a non-Euclidean structure, that stems from a product Lie group of stretches and rotations. This structure admits a bi-invariant metric and thus allows for a consistent analysis via manifold-valued Riemannian statistics. This work further presents a novel shape representation based on discrete fundamental forms that is naturally invariant under Euclidean motion, namely the fundamental coordinates. We endow this representation with a Lie group structure that admits bi-invariant metrics and therefore allows for consistent analysis using manifold-valued statistics based on the Riemannian framework. Furthermore, we derive a simple, efficient, robust, yet accurate (i.e. without resorting to model approximations) solver for the inverse problem that allows for interactive applications. Beyond statistical shape modeling the proposed framework is amenable for surface processing such as quasi-isometric flattening. Additionally, the last part of the thesis aims on shape-based, continuous disease stratification to provide means that objectify disease assessment over the current clinical practice of ordinal grading systems. Therefore, we derive the geodesic B-score, a generalization of the of the Euclidean B-score, in order to assess knee osteoarthritis. In this context we present a Newton-type fixed point iteration for projection onto geodesics in shape space. On the application side, we show that the derived geodesic B-score features, in comparison to its Euclidean counterpart, an improved predictive performance on assessing the risk of total knee replacement surgery.}, language = {en} } @phdthesis{HoppmannBaum, author = {Hoppmann-Baum, Kai}, title = {Mathematical programming for stable control and safe operation of gas transport networks}, publisher = {TU Berlin}, doi = {10.14279/depositonce-15837}, abstract = {The fight against climate change makes extreme but inevitable changes in the energy sector necessary. These in turn lead to novel and complex challenges for the transmission system operators (TSOs) of gas transport networks. In this thesis, we consider four different planning problems emerging from real-world operations and present mathematical programming models and solution approaches for all of them. Due to regulatory requirements and side effects of renewable energy production, controlling today's gas networks with their involved topologies is becoming increasingly difficult. Based on the network station modeling concept for approximating the technical capabilities of complex subnetworks, e.g., compressor stations, we introduce a tri-level MIP model to determine important global control decisions. Its goal is to avoid changes in the network elements' settings while deviations from future inflow pressures as well as supplies and demands are minimized. A sequential linear programming inspired post-processing routine is run to derive physically accurate solutions w.r.t. the transient gas flow in pipelines. Computational experiments based on real-world data show that meaningful solutions are quickly and reliably determined. Therefore, the algorithmic approach is used within KOMPASS, a decision support system for the transient network control that we developed together with the Open Grid Europe GmbH (OGE), one of Europe's largest natural gas TSOs. Anticipating future use cases, we adapt the aforementioned algorithmic approach for hydrogen transport. We investigate whether the natural gas infrastructure can be repurposed and how the network control changes when energy-equivalent amounts of hydrogen are transported. Besides proving the need for purpose-built compressors, we observe that, due to the reduced linepack, the network control becomes more dynamic, compression energy increases by 440\% on average, and stricter regulatory rules regarding the balancing of supply and demand become necessary. Extreme load flows expose the technical limits of gas networks and are therefore of great importance to the TSOs. In this context, we introduce the Maximum Transportation Problem and the Maximum Potential Transport Moment Problem to determine severe transport scenarios. Both can be modeled as linear bilevel programs where the leader selects supplies and demands, maximizing the follower's transport effort. To solve them, we identify solution-equivalent instances with acyclic networks, provide variable bounds regarding their KKT reformulations, apply the big-M technique, and solve the resulting MIPs. A case study shows that the obtained scenarios exceed the maximum severity values of a provided test set by at least 23\%. OGE's transmission system is 11,540km long. Monitoring it is crucial for safe operations. To this end, we discuss the idea of using uncrewed aerial vehicles and introduce the Length-Constrained Cycle Partition Problem to optimize their routing. Its goal is to find a smallest cycle partition satisfying vertex-induced length requirements. Besides a greedy-style heuristic, we propose two MIP models. Combining them with symmetry-breaking constraints as well as valid inequalities and lower bounds from conflict hypergraphs yields a highly performant solution algorithm for this class of problems.}, language = {en} } @phdthesis{Chegini, author = {Chegini, Fatemeh}, title = {Multilevel optimization algorithm for Inverse Problem in Electrocardiography}, abstract = {The electric conductivity of cardiac tissue determines excitation propagation and is vital for quantifying ischemia and scar tissue and building personalized models. As scar tissue is generally characterized by different conduction of electrical excitation, we aim to estimate conductivity-related parameters in mathematical excitation models from endocardial mapping data, particularly the anisotropic conductivity tensor in the monodomain equation, which describes the cardiac excitation. Yet, estimating the distributed and anisotropic conductivity tensors reliably and efficiently from endocardial mapping data or electrocardiograms is a challenging inverse problem due to the computational complexity of the monodomain equation; Many expensive high-resolution computations for the monodomain equation on very fine space and time discretizations are involved. Thus, we aim at building an efficient multilevel method for accelerating the estimation procedure combining electrophysiology models of different complex- ity, which uses a computationally cheap eikonal model in addition to the more accurate monodomain model. Distributed parameter estimation, well-known as an ill-posed inverse problem, can be performed by minimizing the misfit between simulated and measured electrical activity on the endocardial surface subject to the monodomain model and some regularization, leading to a partial differential equation constrained optimization problem. We formulate this optimization problem, including scar tissue modeling and different regularizations, and design an efficient iterative solver. To this aim, we consider monodomain grid hi- erarchies, monodomain-eikonal model hierarchies, and the combination of both hierarchies in a recursive multilevel trust-region (RMTR) method. On the one hand, both the trust region method's estimation quality and efficiency, independent of the data, are investigated from several numerical exam- ples. Endocardial mapping data of realistic density appears to be sufficient to provide quantitatively reasonable estimates of the location, size, and shape of scars close to the endocardial surface. In several situations, scar reconstruction based on eikonal and monodomain models differ significantly, suggesting the use of the more involved monodomain model for this purpose. Moreover, Eikonal models can accelerate the computations considerably, enabling the use of complex electrophysiology models for estimating myocardial scars from endocardial mapping data. In many situations, eikonal models approximate monodomain models well but are orders of magnitude faster to solve. Thus, eikonal models can utilize them to provide an RMTR acceleration with negligible overhead per iteration, resulting in a practical approach to estimating myocardial scars from endocardial mapping data. In addition, the multilevel solver is faster than a comparable single-level solver. On the other hand, we investigate different optimization approaches based on adjoint gradient computation for computing a maximum posterior estimate: steepest descent, limited memory BFGS, and recursive multilevel trust region methods using mesh hierarchies or heterogeneous model hierarchies. We compare overall performance, asymptotic convergence rate, and pre-asymptotic progress on selected examples in order to assess the benefit of our multifidelity acceleration.}, language = {en} } @phdthesis{Kaehler, author = {K{\"a}hler, Ralf}, title = {Accelerated Volume Rendering on Structured Adaptive Meshes}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-2005002769}, abstract = {Multi-scale phenomena are abundant in many application fields. Representing and numerically simulating such processes is a challenging task since quite different scales have to be resolved, which often requires enormous amounts of storage and computational power. An important strategy in this context is adaptivity, i.e. local adjustment of the spatio-temporal resolution to the details to be resolved. A standard representation therefore are hierarchical, locally refined grids. A specific adaptive approach for solving partial differential equations, usually called AMR (Adaptive Mesh Refinement), was introduced in 1984. The basic idea is to combine the simplicity of structured grids and the advantages of local refinement. In this numerical scheme the computations are started on a set of coarse, potentially overlapping structured grids, that cover the computational domain. Local error criteria are applied to detect regions that require higher resolution. These are covered by subgrids with decreasing mesh spacing, which do not replace, but rather overlap the refined regions of the coarser patches. The equations are advanced on the finer subgrids and the refinement procedure recursively continues until all cells fulfill the considered error criteria, giving rise to a hierarchy of nested levels of refinement. In 1989 a variant of this scheme, called Structured Adaptive Mesh Refinement (SAMR), which reduces some of the complexity of the original approach, was proposed. While the separate subgrids in the AMR scheme could be rotated against each other, in SAMR they are aligned with the major axes of the coordinate system, which for example simplifies the computation of fluxes of (conserved) quantities through the cell faces. SAMR has become more and more popular in the last decade, and nowadays it is applied in many domains like hydrodynamics, meteorology and in particular in cosmology and relativistic astrophysics. Due to this growing popularity, an increasing number of scientists is in need of appropriate interactive visualization techniques to interpret and analyze AMR simulation data. Tools for both, 2D analysis to quantitatively convey the information within single slices and 3D representations to apprehend the overall structure are required. In this thesis we develop direct and indirect volume visualization algorithms for scalar fields that are defined on structured Adaptive Mesh Refinement (SAMR) grids. In particular algorithms for planar slicing and the display of height fields, C0-continuous isosurface extraction, software-, and hardware-based direct volume rendering and temporal interpolation for cell-, and vertex-centered data on unrestricted SAMR grids are proposed. Additionally we investigate the applicability of SAMR data structures for accelerated software-, and hardware-based volume rendering of large 3D scalar data.}, language = {en} } @phdthesis{Streubel, author = {Streubel, Tom}, title = {Simulation of Piecewise Smooth Differential Algebraic Equations with Application to Gas Networks}, doi = {http://dx.doi.org/10.18452/24688}, school = {Humboldt-Universit{\"a}t zu Berlin}, language = {en} } @phdthesis{Grewe, author = {Grewe, Martin}, title = {An Extended 3D Morphable Face Model with Applications in Experimental Psychology}, pages = {187}, abstract = {Our faces and facial expressions are an important means of communication and social interaction. One goal of the behavioral sciences is to better understand how the features of the faces that we look at influence our behavior. These include static features like facial proportions or the shape and color of certain parts of a face which primarily constitute facial identity, as well as dynamic movements resulting from the activation of the mimic musculature. Experimental psychology provides an empirical approach to this endeavor. In experiments, participants are typically exposed to images or videos of realistic faces with specifically controlled features. By analysis of the reactions to such stimuli, conclusions can be drawn about the influence of facial features on the participants' behavior. Psychologists today mostly generate face stimuli with the help of digital tools. Image editing with Photoshop is highly flexible, but also time-consuming and subjective. Using tools like Psychomorph or Fantamorph is easier and more objective, but does not allow specific control over facial features. In contrast, stimulus generation with 3D Morphable Face Models (3DMMs) offers a better balance between objectivity, ease of use, and flexibility. 3DMMs are statistical models which have been determined from 3D scans of real people's faces and facial expressions. After these training scans have been brought into correspondence, methods like principal component analysis (PCA) can be used to determine the major modes of variation of facial shape and texture in the data. Such modes typically vary the overall facial proportions, expressions, or skin color. They can be individually controlled and flexibly combined to generate new faces and facial expressions. The plausibility of the generated faces can be ensured by having the mode combinations follow the multivariate distribution of the training data. 3DMMs have been mostly used by psychologists for the generation of stimulus images of faces with neutral expression. Static and dynamic stimuli of facial expressions are also of great interest, but generation with 3DMMs is less common. A problem is that the majority of current 3DMMs can only generate facial movements according to the six prototypic expressions of anger, disgust, fear, happiness, sadness, and surprise. More diverse or subtle expressions are often impossible. Among other reasons, this is due to the difficulty in establishing accurate correspondence in the training data. Further, the modes of most 3DMMs were created by means of PCA. These modes often lack interpretability, fail to generate facial details, and rarely provide psychologists a specific control over identity or expression features. Some 3DMMs also generate subtle artifacts that might lead to undesired effects during face perception. They are also less realistic than faces which were designed by artistic experts for recent computer games and animated movies. Last but not least, current 3DMMs have probably not yet been used for interactive experiments in virtual reality (VR) for technical reasons. Although they provide many advantages also beyond the generation of static or dynamic stimuli, the limitations of current 3DMMs have so far prevented a widespread usage in experimental psychology. The goal of this dissertation is to foster the creation and usage of 3DMMs in this context. To this end, we make three major contributions. First, we describe a matching method that establishes correspondence for 3D face scans with a very high accuracy. Unlike the most commonly used methods, it transforms the facial features into a 2D intermediate representation so that they can be aligned to a reference using image registration. We perform experiments with a large database of 3D scans of faces and facial expressions showing that our method outperforms previous approaches. Second, the 3D scans which were previously brought into correspondence are used for the creation of a 3DMM whose resolution is an order of magnitude higher than that of most existing models. We learn a variety of meaningful modes that, e.g., vary features only in specific regions of the face, or that are related to demographic factors such as ethnicity and age. Further, modes of local facial movements are established that can be flexibly combined into a large variety of expressions. We evaluate the quality of the newly created 3DMM in two experiments. Our results show its advantages over previous models, especially the higher degree of realism of dynamic stimuli of facial expressions which were created with our model. Third, we demonstrate that 3DMMs can not only be used for the generation of stimuli. We develop two experimental methods that are readily applicable in experimental psychology. Initially, we create 3D avatar faces with our 3DMM that are readily applicable in VR. They are used in a new open source framework for virtual mirror experiments on self-face perception. A study is conducted which demonstrates the advantages of the framework over previous methods. Furthermore, our 3DMM is used to create a method for improved control of facial asymmetry in existing stimulus photographs. We show that the method accounts for different dimensions of facial asymmetry and is less sensitive than previous approaches to extrinsic factors like the posture of the head. The different methods are evaluated in a study investigating the influence of facial asymmetry on ratings of attractiveness, femininity, and masculinity. The results indicate the benefits and validity of our method.}, language = {en} } @phdthesis{Hennings, author = {Hennings, Felix}, title = {Modeling and solving real-world transient gas network transport problems using mathematical programming}, abstract = {This thesis considers the transient gas network control optimization problem for on-shore pipeline-based transmission networks with numerous gas routing options. As input, the problem is given the network's topology, its initial state, and future demands at the boundaries of the network, which prescribe the gas flow exchange and potentially the pressure values. The task is to find a set of future control measures for all the active, i.e., controllable, elements in the network that minimizes a combination of different penalty functions. The problem is examined in the context of a decision support tool for gas network dispatchers. This results in detailed models featuring a diverse set of constraints, large and challenging real-world instances, and demanding time limit requirements. All these factors further complicate the problem, which is already difficult to solve in theory due to the inherent combination of non-linear and combinatorial aspects. Our contributions concern different steps of the process of solving the problem. Regarding the model formulation, we investigate the validity of two common approximations of the gas flow description in transport pipes: neglecting the inertia term and assuming a friction term that linearly depends on the gas flow and the pressure. For both, we examine if they can be applied under real-world conditions by evaluating a large amount of historical state data of the network of our project partner, the gas network operator Open Grid Europe. While we can confirm that it is reasonable to ignore the influence of the inertia term, the friction term linearization leads to significant errors and, as a consequence, cannot be used for describing the general gas flow behavior in transport pipes. As another topic of this thesis, we introduce the target value concept as a more realistic approach to express control actions of dispatchers regarding regulators and compressor stations. Here, we derive the mechanisms defined for target values based on the gas flow principles in pipes and develop a mixed-integer programming model capturing their behavior. The accuracy of this model is demonstrated in comparison to a target-value-based industry-standard simulator. Furthermore, we present two heuristics for the transient gas network control optimization problem featuring target values that are based on approximative models for the target-value-based control and determine the final decisions in a post-processing step. To compare the performance of the two heuristics with the approach of directly solving the corresponding model, we evaluate them on a set of artificially created test instances. Finally, we develop problem-specific algorithms for two variants of the described problem. One considers the control optimization for a single network station, which represents a local operation site featuring a large number of active elements. The used transient model is very detailed and includes a sophisticated representation of the compressor stations. Based on the shortness of the pipes in the station, the corresponding algorithm finds valid solutions by solving a series of stationary model variants as well as a transient rolling horizon approach. As the second variant, we consider the problem on the entire network but assume an approximative model representing the control capabilities of network stations. Aside from a new description of the compression capabilities, we introduce an algorithm that uses a combination of sequential mixed-integer programming, two heuristics based on reduced time horizons, and a specialized dynamic branch-and-bound node limit to determine promising values for the binary variables of the model. Complete solutions for the problem are obtained by fixing the binary values and solving the remaining non-linear program. Both algorithms are investigated in extensive empirical studies based on real-world instances of the corresponding model variants.}, language = {en} } @phdthesis{Iravani, author = {Iravani, Sahar}, title = {Interpretable Deep Learning Approaches for Biomarker Detection from High-Dimensional Biomedical Data}, language = {en} } @phdthesis{Conrad, author = {Conrad, Tim}, title = {Metabolic Pathways}, language = {en} } @phdthesis{Rams, author = {Rams, Mona Milena}, title = {New approaches for unsupervised transcriptomic data analysis based on Dictionary learning}, language = {en} } @phdthesis{Helfmann, author = {Helfmann, Luzie}, title = {Non-stationary Transition Path Theory with applications to tipping and agent-based models}, doi = {http://dx.doi.org/10.17169/refubium-35374}, abstract = {The interesting dynamical regimes in agent-based models (ABMs) of social dynamics are the transient dynamics leading to metastable or absorbing states, and the transition paths between metastable states possibly caused by external influences. In this thesis, we are particularly interested in the pathways of rare and critical transitions such as the tipping of the public opinion in a population or the forming of social movements. For a detailed quantitative analysis of these transition paths, we consider the agent-based models as Markov chains and employ Transition Path Theory. Since ABMs are usually not considered in stationarity and possibly even forced, we generalize Transition Path Theory to time-dependent dynamics, for example on finite-time intervals or with periodically varying transition probabilities. We also specifically consider the case of dynamics with absorbing states and show how the transitions prior to absorption can be studied. These generalizations can also be useful in other application domains such as for studying tipping in climate models or transitions in molecular models with external stimuli. Another obstacle when analysing the dynamics of agent-based models is the large number of agents resulting in a high-dimensional state space for the model. However, the emergent dynamics of the ABM usually has significantly fewer degrees of freedom and many symmetries enabling a model reduction. On the example of two stationary ABMs we demonstrate how a long model simulation can be employed to find a lower-dimensional parametrization of the state space using a manifold learning algorithm called Diffusion Maps. In the considered models, agents adapt their binary behaviour to the local neighbourhood. When the interaction network consists of several densely connected communities, the dynamics result in a largely coherent behaviour in each community. The low-dimensional structure of the state space is therefore a hypercube. The corners represent metastable states with coherent agent behaviour in each group and the edges correspond to transition paths where agents in a community change their behaviour through a chain reaction. Finally, we can apply Transition Path Theory to the effective dynamics in the reduced space to reveal, for example, the dominant transition paths or the agents that are most indicative of an impending tipping event.}, language = {en} } @phdthesis{Schuette1999, author = {Sch{\"u}tte, Christof}, title = {Partial Wigner transforms and the quantum-classical Liouville equation}, year = {1999}, language = {en} } @phdthesis{Pfeuffer, author = {Pfeuffer, Julianus}, title = {Computational Methods for Protein Inference in Shotgun Proteomics Experiments}, abstract = {Since the beginning of this millennium, the advent of high-throughput methods in numerous fields of the life sciences led to a shift in paradigms. A broad variety of technologies emerged that allow comprehensive quantification of molecules involved in biological processes. Simultaneously, a major increase in data volume has been recorded with these techniques through enhanced instrumentation and other technical advances. By supplying computational methods that automatically process raw data to obtain biological information, the field of bioinformatics plays an increasingly important role in the analysis of the ever-growing mass of data. Computational mass spectrometry in particular, is a bioinformatics field of research which provides means to gather, analyze and visualize data from high-throughput mass spectrometric experiments. For the study of the entirety of proteins in a cell or an environmental sample, even current techniques reach limitations that need to be circumvented by simplifying the samples subjected to the mass spectrometer. These pre-digested (so-called bottom-up) proteomics experiments then pose an even bigger computational burden during analysis since complex ambiguities need to be resolved during protein inference, grouping and quantification. In this thesis, we present several developments in the pursuit of our goal to provide means for a fully automated analysis of complex and large-scale bottom-up proteomics experiments. Firstly, due to prohibitive computational complexities in state-of-the-art Bayesian protein inference techniques, a refined, more stable technique for performing inference on sums of random variables was developed to enable a variation of standard Bayesian inference for the problem. nextflow and part of a set of standardized, well-tested, and community-maintained workflows by the nf-core collective. Our workflow runs on large-scale data with complex experimental designs and allows a one-command analysis of local and publicly available data sets with state-of-the-art accuracy on various high-performance computing environments or the cloud.}, language = {en} } @phdthesis{Miltenberger, author = {Miltenberger, Matthias}, title = {Linear Programming in MILP Solving - A Computational Perspective}, publisher = {Verlag Dr. Hut GmbH}, isbn = {9783843953238}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-91873}, pages = {237}, abstract = {Mixed-integer linear programming (MILP) plays a crucial role in the field of mathematical optimization and is especially relevant for practical applications due to the broad range of problems that can be modeled in that fashion. The vast majority of MILP solvers employ the LP-based branch-and-cut approach. As the name suggests, the linear programming (LP) subproblems that need to be solved therein influence their behavior and performance significantly. This thesis explores the impact of various LP solvers as well as LP solving techniques on the constraint integer programming framework SCIP Optimization Suite. SCIP allows for comparisons between academic and open-source LP solvers like Clp and SoPlex, as well as commercially developed, high-end codes like CPLEX, Gurobi, and Xpress. We investigate how the overall performance and stability of an MILP solver can be improved by new algorithmic enhancements like LP solution polishing and persistent scaling that we have implemented in the LP solver SoPlex. The former decreases the fractionality of LP solutions by selecting another vertex on the optimal hyperplane of the LP relaxation, exploiting degeneracy. The latter provides better numerical properties for the LP solver throughout the MILP solving process by preserving and extending the initial scaling factors, effectively also improving the overall performance of SCIP. Both enhancement techniques are activated by default in the SCIP Optimization Suite. Additionally, we provide an analysis of numerical conditions in SCIP through the lens of the LP solver by comparing different measures and how these evolve during the different stages of the solving process. A side effect of our work on this topic was the development of TreeD: a new and convenient way of presenting the search tree interactively and animated in the three-dimensional space. This visualization technique facilitates a better understanding of the MILP solving process of SCIP. Furthermore, this thesis presents the various algorithmic techniques like the row representation and iterative refinement that are implemented in SoPlex and that distinguish the solver from other simplex-based codes. Although it is often not as performant as its competitors, SoPlex demonstrates the ongoing research efforts in the field of linear programming with the simplex method. Aside from that, we demonstrate the rapid prototyping of algorithmic ideas and modeling approaches via PySCIPOpt, the Python interface to the SCIP Optimization Suite. This tool allows for convenient access to SCIP's internal data structures from the user-friendly Python programming language to implement custom algorithms and extensions without any prior knowledge of SCIP's programming language C. TreeD is one such example, demonstrating the use of several Python libraries on top of SCIP. PySCIPOpt also provides an intuitive modeling layer to formulate problems directly in the code without having to utilize another modeling language or framework. All contributions presented in this thesis are readily accessible in source code in SCIP Optimization Suite or as separate projects on the public code-sharing platform GitHub.}, language = {en} } @phdthesis{Fackeldey2009, author = {Fackeldey, Konstantin}, title = {The Weak Coupling Method for Coupling Continuum Mechanics with Molecular Dynamics}, year = {2009}, language = {en} } @phdthesis{Donati2019, author = {Donati, Luca}, title = {Reweighting methods for Molecular Dynamics}, publisher = {Freie Universit{\"a}t Berlin}, doi = {10.17169/refubium-2305}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-refubium-24541-6}, year = {2019}, abstract = {The dynamical response of molecular systems, when the potential energy function is perturbed at a microscopic level, is difficult to predict without a numerical or laboratory experiment. This is due to the non-linearity and high-dimensionality of molecular systems. An efficient investigation of such a behaviour is necessary to better understand the nature of molecules and to improve the predictability of Molecular Dynamics simulations. In this thesis we propose a reweighting scheme for Markov State Models (MSMs), based on the Girsanov theorem, that permits to reduce the computational cost of the analysis when the potential energy function of a molecule is perturbed. The method has been successfully extended and implemented with metadynamics, in order to build the MSM of a molecular system in a significantly shorter computational time compared to a standard unbiased MD simulation. We also propose a new method to discretize the infinitesimal generator into a rate matrix, that could be used to efficiently study Hamiltonian perturbations as well.}, language = {en} } @phdthesis{Tillmann, author = {Tillmann, Peter}, title = {Optimizing bifacial tandem solar cells for realistic operation conditions}, doi = {10.17169/refubium-39571}, language = {en} } @phdthesis{Dercksen2015, author = {Dercksen, Vincent J.}, title = {Visual computing techniques for the reconstruction and analysis of anatomically realistic neural networks}, year = {2015}, abstract = {To understand how the brain translates sensory input into behavior, one needs to identify, at the cellular level, the involved neural circuitry and the electrical signals it carries. This thesis describes methods and tools that enable neuroscientists to obtain important anatomical data, including neuron numbers and shapes, from 3D microscopy images. On this basis, tools have been developed to create and visually analyze anatomically realistic 3D models of neural networks: 1. An automatic segmentation method for determining the number and location of neuron cell bodies in 3D microscopy images. Application of this method yields a difference of merely ∼4\% between automatically and manually counted cells, which is sufficiently accurate for application in large-scale counting experiments. 2. A method for the automatic alignment of 3D section volumes containing filamentous structures. To this end, an existing point-matching-based method has been adapted such that sections containing neuron and microtubule fragments could be successfully aligned. 3. The Filament Editor, a 3D proof-editing tool for visual verification and correction of automatically traced filaments. The usefulness of the Filament Editor is demonstrated by applying it in a validated neuron reconstruction pipeline to create 3D models of long-range and complex neuronal branches. 4. The tool NeuroNet, which is used to assemble an anatomical model of a neural network representing the rat barrel cortex (or subnetworks therein, e.g. individual cortical columns), based on reconstructed anatomical data, such as neuron distributions and 3D morphologies. The tool estimates synaptic connectivity between neurons based on structural overlap between axons and dendrites. 5. A framework for the interactive visual analysis of synaptic connectivity in such networks at multiple scales. It works from the level of neuron populations down to individual synapse positions on dendritic trees. It comprises the Cortical Column Connectivity Viewer, developed to analyze synaptic connections between neuron populations within and between cortical columns. The usefulness of these methods is demonstrated by applying them to reconstruct and analyze neural networks in the rat barrel cortex. Finally, I describe several applications of these methods and tools by neuroscientists, yielding significant biological findings regarding neuron anatomy and connectivity.}, language = {en} } @phdthesis{Hanik2023, author = {Hanik, Martin}, title = {Geometric Data Analysis: Advancements of the Statistical Methodology and Applications}, publisher = {Refubium}, doi = {10.17169/refubium-39809}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-refubium-40087-8}, pages = {192}, year = {2023}, abstract = {Data analysis has become fundamental to our society and comes in multiple facets and approaches. Nevertheless, in research and applications, the focus was primarily on data from Euclidean vector spaces. Consequently, the majority of methods that are applied today are not suited for more general data types. Driven by needs from fields like image processing, (medical) shape analysis, and network analysis, more and more attention has recently been given to data from non-Euclidean spaces---particularly (curved) manifolds. It has led to the field of geometric data analysis whose methods explicitly take the structure (for example, the topology and geometry) of the underlying space into account. This thesis contributes to the methodology of geometric data analysis by generalizing several fundamental notions from multivariate statistics to manifolds. We thereby focus on two different viewpoints. First, we use Riemannian structures to derive a novel regression scheme for general manifolds that relies on splines of generalized B{\´e}zier curves. It can accurately model non-geodesic relationships, for example, time-dependent trends with saturation effects or cyclic trends. Since B{\´e}zier curves can be evaluated with the constructive de Casteljau algorithm, working with data from manifolds of high dimensions (for example, a hundred thousand or more) is feasible. Relying on the regression, we further develop a hierarchical statistical model for an adequate analysis of longitudinal data in manifolds, and a method to control for confounding variables. We secondly focus on data that is not only manifold- but even Lie group-valued, which is frequently the case in applications. We can only achieve this by endowing the group with an affine connection structure that is generally not Riemannian. Utilizing it, we derive generalizations of several well-known dissimilarity measures between data distributions that can be used for various tasks, including hypothesis testing. Invariance under data translations is proven, and a connection to continuous distributions is given for one measure. A further central contribution of this thesis is that it shows use cases for all notions in real-world applications, particularly in problems from shape analysis in medical imaging and archaeology. We can replicate or further quantify several known findings for shape changes of the femur and the right hippocampus under osteoarthritis and Alzheimer's, respectively. Furthermore, in an archaeological application, we obtain new insights into the construction principles of ancient sundials. Last but not least, we use the geometric structure underlying human brain connectomes to predict cognitive scores. Utilizing a sample selection procedure, we obtain state-of-the-art results.}, language = {en} } @phdthesis{Kratz, author = {Kratz, Andrea}, title = {Three-Dimensional Second-Order Tensor Fields: Exploratory Visualization and Anisotropic Sampling}, school = {Freie Universit{\"a}t Berlin}, pages = {XIV, 157 S.}, abstract = {Tensors provide a powerful mathematical language to describe physical phenomena. Consequently, they have a long tradition in physics and appear in various application areas, either as intermediate product or as output of simulations or measurements. The potential of tensors to describe complex anisotropic behavior, however, concurrently complicates their interpretation. The central research question of this thesis is how three-dimensional tensor fields of second order are visualized effectively so that, as a long term goal, their interpretation becomes easier. The focus of this thesis lies on the class of indefinite tensors. The methods that are proposed in this thesis fall into two main categories: (1.) the interactive exploration of the three-dimensional tensor data, and (2.) the geometric reduction of the data to two-dimensional planes or triangulated surfaces. In both cases, possible visualization approaches are presented. For interactive exploration of the data, we propose to combine diagram views with three-dimensional hybrid visualizations. We show that this facilitates familiarizing with the data and leads to exciting analytic queries. If a geometric data reduction is possible, we focus on glyph- and texture-based methods. In this context, the thesis is concerned with methods to improve their quality. Therefore, we propose two algorithms for the efficient creation of anisotropic sample distributions. Moreover, we present a novel visualization method that works on planar slices as well as on triangulated surfaces. The basic idea of this method is to use anisotropic sample distributions for the efficient computation of anisotropic Voronoi cells, which then are used as base elements for texture mapping. Hence, the usage of textures to encode the tensor's various degrees of freedom becomes possible. We evaluate our methods for the interactive exploration on stress tensor fields from structure simulations. To show the ffectiveness of novel visualization methods, various datasets are presented.}, language = {en} } @phdthesis{Daragmeh, author = {Daragmeh, Adman}, title = {Model Order Reduction of Linear Control Systems: Comparison of Balance Truncation and Singular Perturbation Approximation with Application to Optimal Control}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-fudissthesis000000102586-2}, language = {en} } @phdthesis{Zonker, author = {Zonker, Johannes}, title = {Coarse Graining of Agent-Based Models and Spatio-Temporal Modeling of Spreading Processes}, language = {en} } @phdthesis{Binkowski, author = {Binkowski, Felix}, title = {Riesz-projection-based methods for the numerical simulation of resonance phenomena in nanophotonics}, doi = {10.17169/refubium-41230}, language = {en} } @phdthesis{Turner, author = {Turner, Mark}, title = {Cutting Plane Selection for Mixed-Integer Linear Programming}, abstract = {Mixed-Integer Linear Programming (MILP) is a ubiquitous and practical modelling paradigm that is essential for optimising a broad range of real-world systems. The backbone of all modern MILP solvers is the branch-and-cut algorithm, which is a hybrid of the branch-and-bound and cutting planes algorithms. Cutting planes (cuts) are linear inequalities that tighten the relaxation of a MILP. While a lot of research has gone into deriving valid cuts for MILPs, less emphasis has been put on determining which cuts to select. Cuts in general are generated in rounds, and a subset of the generated cuts must be added to the relaxation. The decision on which subset of cuts to add is called cut selection. This is a crucial task since adding too many cuts makes the relaxation large and slow to optimise over. Conversely, adding too few cuts results in an insufficiently tightened relaxation, and more relaxations need to be enumerated. To further emphasise the difficulty, the effectiveness of an applied cut is both dependent on the other applied cuts, and the state of the MILP solver. In this thesis, we present theoretical results on the importance and difficulty of cut selection, as well as practical results that use cut selection to improve general MILP solver performance. Improving general MILP solver performance is of great importance for practitioners and has many runoff effects. Reducing the solve time of currently solved systems can directly improve efficiency within the application area. In addition, improved performance enables larger systems to be modelled and optimised, and MILP to be used in areas where it was previously impractical due to time restrictions. Each chapter of this thesis corresponds to a publication on cut selection, where the contributions of this thesis can naturally be divided into four components. The first two components are motivated by instance-dependent performance. In practice, for each subroutine, including cut selection, MILP solvers have adjustable parameters with hard-coded default values. It is ultimately unrealistic to expect these default values to perform well for every instance. Rather, it would be ideal if the parameters were dependent on the given instance. To show this motivation is well founded, we first introduce a family of parametric MILP instances and cuts to showcase worst-case performance of cut selection for any fixed parameter value. We then introduce a graph neural network architecture and reinforcement learning framework for learning instance-dependent cut scoring parameters. In the following component, we formalise language for determining if a cut has theoretical usefulness from a polyhedral point of view in relation to other cuts. In addition, to overcome issues of infeasible projections and dual degeneracy, we introduce analytic center based distance measures. We then construct a lightweight multi-output regression model that predicts relative solver performance of an instance for a set of distance measures. The final two components are motivated by general MILP solver improvement via cut selection. Such improvement was shown to be possible, albeit difficult to achieve, by the first half of this thesis. We relate branch-and-bound and cuts through their underlying disjunctions. Using a history of previously computed Gomory mixed-integer cuts, we reduce the solve time of SCIP over the 67\% of affected MIPLIB 2017 instances by 4\%. In the final component, we introduce new cut scoring measures and filtering methods based on information from other MILP solving processes. The new cut selection techniques reduce the solve time of SCIP over the 97\% of affected MIPLIB 2017 instances by 5\%.}, language = {en} } @phdthesis{Chewle, author = {Chewle, Surahit}, title = {Probing effects of organic solvents on paracetamol crystallization using in silico and orthogonal in situ methods}, abstract = {Polymorphism is the property exhibited by many inorganic and organic molecules to crystallize in more than one crystal structure. There is a strong need for understanding the influencing factors on polymorphism, as it is responsible for differences in many physicochemical properties such as stability and solubility. Nearly 80 \% of marketed drugs exhibit polymorphism. In this work, we took the model system of paracetamol to investigate the influence of solvent choice on its polymorphism. Different methods were developed and employed to understand the influence of small organic solvents on the crystallization of paracetamol. Non-equilibrium molecular dynamics simulations with periodic simulated annealing were used as a tool to probe the nature of precursors of the metastable intermediates occurring in the crystallization process. Using this method, it was found that the structures of the building blocks of crystals of paracetamol is governed by solvent-solute interactions. In situ Raman spectroscopy was used with a custom-made acoustic levitator to follow crystallization. This set-up is a reliable method for investigating solvent influence, attenuating heterogeneous nucleation and stabilizing other environmental factors. It was established that as a solvent, ethanol is much stronger than methanol in its effect of driving paracetamol solutions to their crystal form. The time-resolved Raman spectroscopy crystallization data was processed using a newly developed objective function based non-negative matrix factorization method (NMF). An orthogonal time-lapse photography was used in conjunction with NMF to get unique and accurate factors that pertain to the spectra and concentrations of different moieties of paracetamol crystallization existing as latent components in the untreated data.}, language = {en} } @phdthesis{Niemann, author = {Niemann, Jan-Hendrik}, title = {Learning Reduced Models for Large-Scale Agent-Based Systems}, doi = {10.17169/refubium-35245}, url = {http://nbn-resolving.de/urn:nbn:de:kobv:188-refubium-35530-6}, pages = {130}, abstract = {Modeling social systems and studying their dynamical behavior plays an important role in many fields of research. Agent-based modeling provides a high degree of detail into artificial societies by describing the model from the perspective of the agents. The interactions of agents, often characterized by simple rules, lead to complex, time-evolving patterns. Their understanding is of great importance, e.g., for predicting and influencing epidemics. Analysis and simulation, however, often becomes prohibitively time-consuming when the number of agents or the time scale of interest is large. Therefore, this thesis is devoted to learn significantly reduced models of large-scale agent-based systems from simulation data. We show how data-driven methods based on transfer operators can be used to find reduced models represented by ordinary or stochastic differential equations that describe the dynamical behavior of larger groups or entire populations and thus enable the analysis and prediction of agent-based systems. To this end, we first present an extension of EDMD (extended dynamic mode decomposition) called gEDMD to approximate the Koopman generator from data. This method can be used to compute eigenfunctions, eigenvalues, and modes of the generator, as well as for system identification and model reduction of both deterministic and non-deterministic dynamical systems. Secondly, we analyze the long-term behavior of certain agent-based models and their pathwise approximations by stochastic differential equations for large numbers of agents using transfer operators. We show that, under certain conditions, the transfer operator approach connects the pathwise approximations on finite time scales with methods for describing the behavior on possibly exponentially long time scales. As a consequence, we can use the finite-time, pathwise approximations to characterize metastable behavior on long time scales using transfer operators. This can significantly reduce the computational cost. The third part addresses the data-driven model reduction since in many cases no analytical limit models are known or existent. We show how the Koopman operator theory can be used to infer the governing equations of agent-based systems directly from simulation data. Using benchmark problems, we demonstrate that for sufficiently large population sizes the data-driven models agree well with analytical limit equations and, moreover, that the reduced models allow predictions even in cases far from the limit or when no limit equations are known. Lastly, we demonstrate the potential of the presented approach. We present an ansatz for the multi-objective optimization of agent-based systems with the help of data-driven surrogate models based on the Koopman generator. In particular, when limit models are unknown or non-existent, this approach makes multi-objective optimization problems solvable that would otherwise be computationally infeasible due to very expensive objective functions.}, language = {en} } @phdthesis{Tack, author = {Tack, Alexander}, title = {Machine Learning-based Assessment of Multiple Anatomical Structures in Medical Image Data for Diagnosis and Prediction of Knee Osteoarthritis}, doi = {10.14279/depositonce-19738}, abstract = {Knee osteoarthritis (KOA) is a degenerative disease that leads to pain and loss of function. It is estimated to affect over 500 million humans world-wide and is one of the most common reasons for disability. KOA is usually diagnosed by radiologists or clinical experts by anamnesis, physical examination, and by assessing medical image data. The latter is typically acquired using X-Ray or magnetic resonance imaging. Since manual image reading is subjective, tedious and time-consuming, automated methods are required for a fast and objective decision support and for a better understanding of the pathogenesis of KOA. This thesis sets a foundation towards automated computation of image-based KOA biomarkers for holistic assessment of the knee. This involves the assessment of multiple knee bones and soft tissues. An assessment of particular structures requires localization of these tissues. In order to automate a faithful localization of anatomical structures, deep learning-based methods are investigated and utilized. Additionally, convolutional neural networks (CNNs) are used for classification of medical image data, i.e., for a direct determination of the disease status and to detect anatomical structures and landmarks. The automatically computed anatomical volumes, locations, and other measurements are finally compared to values acquired by clinical experts and evaluated for clustering of KOA groups, classification of KOA severity, prediction of KOA progression, and prediction of total knee replacement. In various experiments it is shown that CNN-based methods are suitable for accurate medical image segmentation, object detection, landmark detection, and direct classification of disease stages from the image data. Computed features related to the menisci are found to be most expressive in terms of clustering of KOA groups and predicting of future disease states, thus allowing diagnosis of current KOA conditions and prediction of future conditions. The conclusion of this thesis is that machine learning-based, fully automated processing of medical image data shows potential for diagnosis and prediction of KOA grades. Future studies could investigate additional features in order to achieve an assessment of the whole knee or validate the findings of this work in clinical studies.}, language = {en} } @phdthesis{Lenz, author = {Lenz, Ralf}, title = {Optimization of Stationary Expansion Planning and Transient Network Control by Mixed-Integer Nonlinear Programming}, doi = {10.14279/depositonce-12765}, language = {en} } @phdthesis{Rehfeldt, author = {Rehfeldt, Daniel}, title = {Faster algorithms for Steiner tree and related problems: From theory to practice}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-85148}, language = {en} } @phdthesis{Danecker, author = {Danecker, Fabian}, title = {A Discrete-Continuous Algorithm for Globally Optimal Free Flight Trajectory Optimization}, abstract = {This thesis introduces the novel hybrid algorithm DisCOptER for globally optimal flight planning. DisCOptER (Discrete-Continuous Optimization for Enhanced Resolution) com- bines discrete and continuous optimization in a two-stage approach to find optimal trajectories up to arbitrary precision in finite time. In the discrete phase, a directed auxiliary graph is created in order to define a set of candidate paths that densely covers the relevant part of the trajectory space. Then, Yen's algorithm is employed to identify a set of promising candidate paths. These are used as starting points for the subsequent stage in which they are refined with a locally convergent optimal control method. The correctness, accuracy, and complexity of DisCOptER are intricately linked to the choice of the switch-over point, defined by the discretization coarseness. Only a sufficiently dense graph enables the algorithm to find a path within the convex domain surrounding the global minimizer. Initialized with such a path, the second stage rapidly converges to the optimum. Conversely, an excessively dense graph poses the risk of overly costly and redundant computations. The determination of the optimal switch-over point necessitates a profound understanding of the local behavior of the problem, the approximation properties of the graph, and the convergence characteristics of the employed optimal control method. These topics are explored extensively in this thesis. Crucially, the density of the auxiliary graph is solely dependent on the en- vironmental conditions, yet independent of the desired solution accuracy. As a consequence, the algorithm inherits the superior asymptotic convergence properties of the optimal control stage. The practical implications of this computational efficiency are demonstrated in realistic environments, where the DisCOptER algorithm consistently delivers highly accurate globally optimal trajectories with exceptional computational efficiency. This notable improvement upon existing approaches underscores the algorithm's significance. Beyond its technical prowess, the DisCOptER algorithm stands as a valuable tool contributing to the reduction of costs and the overall enhancement of flight operations efficiency.}, language = {en} }