@phdthesis{Lehner2025, author = {Lehner, Constanze}, title = {Three Essays on the Interpretability of Random Forests: Methods, Insights, and Innovations}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-18608}, school = {Universit{\"a}t Passau}, pages = {126 Seiten}, year = {2025}, abstract = {This thesis examines the interpretability of random forests in three essays, focusing on the discussion of established methods, the presentation of new insights and the provision of innovations. The research aims to bridge the gap between traditional statistical methods and random forests, a data-driven machine learning algorithm, by investigating the ability of random forests to adequately model theoretical concepts compared to parametric methods and developing a new approach for statistical hypothesis testing. The results have implications for various areas of research in which interpretability is crucial for applications. The next three paragraphs summarize the studies presented in this thesis. The ability of random forests to automatically model interactions without the need for pre-specification is mentioned prominently in many articles and book chapters. Promising empirical results from early work on random forests have substantiated this property, which has led to an increasing popularity of using random forests in the presence of interactions as an alternative to traditional parametric methods. This study reviews the literature of the last 20 years on random forests and interactions. We explore the discussion from its origin in the decision tree literature to early applications of random forests and current research. We identify key research areas and illustrate random forest applications to highlight similarities and differences between disciplines. We also provide a critical examination of the arguments in favor of random forests being able to model interactions automatically. Since the term ``interaction'' is associated with different theoretical concepts, we explain and illustrate the definition of interaction for each research area. The variable importance of random forests is an easy-to-understand metric that intends to make the predictions of random forests more transparent by assessing the contribution of each covariate to the prediction of the response. However, due to its data-driven nature, the variable importance of random forests may over- or underestimate the importance of a covariate, so that the role of the covariate in the underlying data generating process is not correctly reflected. We present an example of underestimation of importance in the case of interacting covariates. We define an interaction in terms of effect modification, which assumes that the effect of one covariate on the response is modified by values of another covariate. We show that the variable importance of random forests is influenced by the interaction form and the measurement scale of the interacting covariates, so that in some cases the importance of one or even both interacting covariates is underestimated. We illustrate how the split decisions of random forests affect the variable importance values of the interacting covariates. Variable importance estimates the contribution of a covariate to the performance of a predictive algorithm. Defined as the increase in loss after the random permutation of a covariate, permutation variable importance makes it possible to rank the covariates by importance, but in the absence of a threshold, the distinction between important and unimportant covariates is inherently arbitrary. We show that recent approaches of non-parametric permutation tests for variable importance exceed their nominal type I error level for mutually dependent covariates. As an alternative we propose a combined variable importance estimate on a sequence of permutations and employ a computationally more efficient bootstrap to derive the respective null distribution and \$p\$-values. The proposed test can be applied to any predictive algorithm and is remarkably fast. We investigate the control of type I error level and the power of the proposed variable importance test in simulation studies. Even for mutually dependent covariates, our test is conservative and provides power comparable to recent advances in non-parametric permutation tests of variable importance. This study was conducted in collaboration with Matthias Wild.}, language = {en} } @article{HarksSchwarz2024, author = {Harks, Tobias and Schwarz, Julian}, title = {Generalized Nash equilibrium problems with mixed-integer variables}, series = {Mathematical Programming}, volume = {209}, journal = {Mathematical Programming}, number = {1}, publisher = {Springer Berlin Heidelberg}, address = {Berlin/Heidelberg}, issn = {0025-5610}, doi = {10.1007/s10107-024-02063-6}, url = {http://nbn-resolving.de/urn:nbn:de:101:1-2405152117292.567771401889}, pages = {231 -- 277}, year = {2024}, abstract = {We consider generalized Nash equilibrium problems (GNEPs) with non-convex strategy spaces and non-convex cost functions. This general class of games includes the important case of games with mixed-integer variables for which only a few results are known in the literature. We present a new approach to characterize equilibria via a convexification technique using the Nikaido-Isoda function. To any given instance of the GNEP, we construct a set of convexified instances and show that a feasible strategy profile is an equilibrium for the original instance if and only if it is an equilibrium for any convexified instance and the convexified cost functions coincide with the initial ones. We develop this convexification approach along three dimensions: We first show that for quasi-linear models, where a convexified instance exists in which for fixed strategies of the opponent players, the cost function of every player is linear and the respective strategy space is polyhedral, the convexification reduces the GNEP to a standard (non-linear) optimization problem. Secondly, we derive two complete characterizations of those GNEPs for which the convexification leads to a jointly constrained or a jointly convex GNEP, respectively. These characterizations require new concepts related to the interplay of the convex hull operator applied to restricted subsets of feasible strategies and may be interesting on their own. Note that this characterization is also computationally relevant as jointly convex GNEPs have been extensively studied in the literature. Finally, we demonstrate the applicability of our results by presenting a numerical study regarding the computation of equilibria for three classes of GNEPs related to integral network flows and discrete market equilibria.}, language = {en} } @article{FritschHauptSchnurbus2024, author = {Fritsch, Markus and Haupt, Harry and Schnurbus, Joachim}, title = {Efficiency of poll-based multi-period forecasting systems for German state elections}, series = {International Journal of Forecasting (Online ISSN: 1872-8200)}, volume = {41 (2025)}, journal = {International Journal of Forecasting (Online ISSN: 1872-8200)}, number = {2}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.ijforecast.2024.04.008}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-18793}, pages = {670 -- 688}, year = {2024}, abstract = {Election polls are frequently employed to reflect voter sentiment with respect to a particular election (or fixed-event). Despite their widespread use as forecasts and inputs for predictive algorithms, there is substantial uncertainty regarding their efficiency. This uncertainty is amplified by judgment in the form of pollsters applying unpublished weighting schemes to ensure the representativeness of the sampled voters for the underlying population. Efficient forecasting systems incorporate past information instantly, which renders a given fixed-event unpredictable based on past information. This results in all sequential adjustments of the fixed-event forecasts across adjacent time periods (or forecast revisions) being martingale differences. This paper illustrates the theoretical conditions related to weak efficiency of fixed-event forecasting systems based on traditional least squares loss and asymmetrically weighted least absolute deviations (or quantile) loss. Weak efficiency of poll-based multi-period forecasting systems for all German federal state elections since the year 2000 is investigated. The inefficiency of almost all considered forecasting systems is documented and alternative explanations for the findings are discussed.}, language = {en} } @article{LechldeMeerFuermann2024, author = {Lechl, Michael and de Meer, Hermann and F{\"u}rmann, Tim}, title = {A stochastic flexibility calculus for uncertainty-aware energy flexibility management}, series = {Applied Energy}, volume = {2025}, journal = {Applied Energy}, number = {379}, doi = {10.1016/j.apenergy.2024.124907}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19233}, pages = {16 Seiten}, year = {2024}, abstract = {The increasing share of volatile renewables in power systems requires more reserves to balance forecast errors in renewable generation and power fluctuations. In contrast, common reserves such as gas-fired power plants are phased out, impeding the procurement of sufficient reserves. Alternative reserves, particularly on the demand side, such as battery storage systems, also exhibit some degree of freedom to deviate from their scheduled operating point to supply or consume more or less power, thus providing a flexibility potential. However, demand-side flexibility potentials are generally subject to uncertainties, and so is the generation of volatile renewables. The challenge is incorporating the uncertainties on both sides to procure sufficient (uncertain) flexibility potential in advance. Considering uncertainty is important to avoid additional, drastic measures in real-time to balance generation and demand, such as curtailing renewable generation or load shedding. This work presents a stochastic flexibility calculus that provides an indicator for computing the risk of insufficient flexibility potentials or, conversely, guarantees for sufficient flexibility potentials. Thus, the stochastic flexibility calculus contributes to overcoming the challenge of procuring sufficient flexibility potentials in renewable-based systems. An evaluation based on real data is performed using an example of a renewable energy community consisting of households equipped with photovoltaic power plants and battery storage systems. The newly introduced stochastic flexibility calculus computes the number of households that must operate their battery storage systems flexibly to balance forecast errors locally. The results show that the forecast method significantly influences this number. Some numerical results appear unexpected, as too many flexibility-friendly households can negatively impact the aggregated household flexibility potential.}, language = {en} } @article{GoerigkKurtz2024, author = {Goerigk, Marc and Kurtz, Jannis}, title = {Data-driven prediction of relevant scenarios for robust combinatorial optimization}, series = {Computers \& Operations Research}, volume = {2025}, journal = {Computers \& Operations Research}, number = {174}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1873-765X}, doi = {10.1016/j.cor.2024.106886}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19207}, pages = {14 Seiten}, year = {2024}, abstract = {We study iterative constraint and variable generation methods for (two-stage) robust combinatorial optimization problems with discrete uncertainty. The goal of this work is to find a set of starting scenarios that provides strong lower bounds early in the process. To this end we define the Relevant Scenario Recognition Problem (RSRP) which finds the optimal choice of scenarios which maximizes the corresponding objective value. We show for classical and two-stage robust optimization that this problem can be solved in polynomial time if the number of selected scenarios is constant and NP-hard if it is part of the input. Furthermore, we derive a linear mixed-integer programming formulation for the problem in both cases. Since solving the RSRP is not possible in reasonable time, we propose a machine-learning-based heuristic to determine a good set of starting scenarios. To this end, we design a set of dimension-independent features, and train a Random Forest Classifier on already solved small-dimensional instances of the problem. Our experiments show that our method is able to improve the solution process even for larger instances than contained in the training set, and that predicting even a small number of good starting scenarios can considerably reduce the optimality gap. Additionally, our method provides a feature importance score which can give new insights into the role of scenario properties in robust optimization.}, language = {en} } @article{ŁatuszyńskiRudolf2024, author = {Łatuszyński, Krzysztof and Rudolf, Daniel}, title = {Convergence of hybrid slice sampling via spectral gap}, series = {Advances in Applied Probability}, volume = {56 (2024)}, journal = {Advances in Applied Probability}, number = {4}, publisher = {Cambridge University Press}, address = {Cambridge}, issn = {1475-6064}, doi = {10.1017/apr.2024.16}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19264}, pages = {1440 -- 1466}, year = {2024}, abstract = {It is known that the simple slice sampler has robust convergence properties; however, the class of problems where it can be implemented is limited. In contrast, we consider hybrid slice samplers which are easily implementable and where another Markov chain approximately samples the uniform distribution on each slice. Under appropriate assumptions on the Markov chain on the slice, we give a lower bound and an upper bound of the spectral gap of the hybrid slice sampler in terms of the spectral gap of the simple slice sampler. An immediate consequence of this is that the spectral gap and geometric ergodicity of the hybrid slice sampler can be concluded from the spectral gap and geometric ergodicity of the simple version, which is very well understood. These results indicate that robustness properties of the simple slice sampler are inherited by (appropriately designed) easily implementable hybrid versions. We apply the developed theory and analyze a number of specific algorithms, such as the stepping-out shrinkage slice sampling, hit-and-run slice sampling on a class of multivariate targets, and an easily implementable combination of both procedures on multidimensional bimodal densities.}, language = {en} } @phdthesis{Danner2025, author = {Danner, Julian}, title = {SAT Solving Using XOR-OR-AND Normal Forms and Cryptographic Fault Attacks}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19171}, school = {Universit{\"a}t Passau}, pages = {vi, 237 Seiten}, year = {2025}, abstract = {The Boolean satisfiability problem (SAT) lies at the core of computational logic and has found many applications in verification, cryptography, and artificial intelligence. While conflict-driven SAT solvers (CDCL) excel on large industrial instances, they struggle with XOR-rich instances arising frequently in cryptanalysis, due to the inefficiency of CNF encodings of linear constraints. Conversely, algebraic approaches can work with linear XOR constraints naturally but fail to scale to relevant sizes. Bridging these complementary paradigms with a focus on cryptographic problems is at the heart of this thesis. On one hand, this dissertation advances SAT solving by introducing the XOR-OR-AND normal form (XNF) as a generalization of the conjunctive normal form (CNF), where literals are replaced by XOR chains of literals. This allows for a native representation of XOR constraints. We generalize the CDCL architecture to the richer language of XNFs. The underlying reasoning based on the proof system SRES which is shown to be exponentially stronger than classical resolution. An implementation demonstrates competitive performance and often surpasses state-of-the-art algebraic and logic solvers on random and cryptographic benchmarks. Furthermore, we prove that every XNF formula can be converted in polynomial time to a formula in 2-XNF, enabling a graph-based approach similar to 2-SAT. Building on this, we propose advanced in- and pre-processing techniques, and construct a simple DPLL-based solving framework. Our implementation, 2-Xornado, outperforms modern algebraic and logic solving approaches on many random and some structured cryptographic problems. On the other hand, we apply combined algebraic and logical techniques to cryptanalysis of stream ciphers. We introduce a formal guess-and-determine (GD) framework using a logical abstraction of the information flow in the internal state. From an algebraic point of view, we can then find optimal GD attacks utilizing a Gr{\"o}bner basis. As a case study, we apply this method to aid in the construction of novel fault attacks on the ciphers KCipher-2 and Enocoro-128v2. Using ad hoc methods combining algebraic and logical approaches, we show that both ciphers are vulnerable to active side-channel attacks under rather weak fault models.}, language = {en} } @phdthesis{Ellinger2025, author = {Ellinger, Simon}, title = {On optimal error rates for strong approximation of stochastic differential equations with irregular drift coefficients}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-19634}, school = {Universit{\"a}t Passau}, pages = {179 Seiten in verschiedenen Seitenz{\"a}hlungen}, year = {2025}, abstract = {In this dissertation we study strong approximation of stochastic differential equations (SDEs) with irregular drift coefficients at the final time point or globally in time by methods that use only finitely many evaluations of the driving Brownian motion. We show the optimality of well-known methods, such as the Euler-Maruyama scheme or a transformed Milstein scheme, for classes of piecewise Lipschitz continuous, H{\"o}lder continuous and Sobolev regular drift coefficients. To do this, we derive the optimal error rates for the different classes of irregular drift coefficients. Furthermore, we show that the solution of an SDE with piecewise H{\"o}lder continuous drift coefficient has a regular local density, which is used in the proofs of the lower bounds.}, language = {en} } @phdthesis{Khosravi2026, author = {Khosravi, Mohammad}, title = {Hard Instances, Improved Algorithms and New Interdiction Models for Robust Optimization}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:739-opus4-20399}, school = {Universit{\"a}t Passau}, pages = {vii, 197 Seiten}, year = {2026}, abstract = {Robust combinatorial optimization seeks solutions that remain effective across all possible realizations of an uncertainty set, making the choice of this set a crucial factor in both the complexity and practical applicability of robust models. A key challenge in this field is striking a balance between computational tractability and solution quality, particularly when dealing with large uncertainty sets. This dissertation advances the field of robust optimization by addressing three central themes: (i) methods for generating hard instances and establishing a benchmark library, (ii) high-quality exact solution methods and approximation algorithms, and (iii) the modeling of uncertainty sets and their impact on problem complexity. The absence of a benchmark library for robust optimization problems makes it difficult to conduct fair and effective comparisons of different solution methods. As a result, researchers often rely on randomly generated instances, which may hinder meaningful evaluations. To address this issue, this work develops optimization-based and heuristic methods for generating challenging instances of robust problems. Additionally, to facilitate more consistent and insightful comparisons of solution algorithms with minimal effort, we introduce a standardized benchmark library for use by the research community. To tackle the computational challenges posed by large uncertainty sets, this dissertation proposes scenario reduction techniques specifically designed for robust optimization. These methods aim to reduce the size of the uncertainty set while preserving the objective value as accurately as possible. Unlike traditional clustering approaches, this formulation treats scenario reduction as an optimization problem independent of the underlying decision-making model, enabling structured reductions with theoretical performance guarantees. Experimental results demonstrate that this approach produces solutions of comparable or superior quality compared to those obtained through general-purpose clustering techniques. Building on this framework, we further refine scenario reduction by incorporating information about the structure of feasible solutions. While previous reduction methods focused exclusively on the uncertainty set, we show that integrating knowledge of feasible solutions leads to improved uncertainty sets and more accurate robust models. Through a combination of theoretical analysis and computational experiments, we establish the effectiveness of this approach in enhancing both tractability and solution quality in robust combinatorial optimization. Finally, we introduce a novel variant of discrete budgeted uncertainty for cardinality-based constraints or objectives, incorporating a weight vector into the budget constraint. Our theoretical analysis reveals that while the adversarial problem can be solved in linear time, the robust problem becomes NP-hard and non-approximable. Nonetheless, we propose and evaluate alternative modeling approaches that demonstrate promising scalability in practice. This dissertation contributes to robust optimization by offering new perspectives on uncertainty modeling, algorithmic techniques for scenario reduction, and complexity analyses of key robust problems. The proposed methods provide both theoretical guarantees and practical advancements, paving the way for more efficient and scalable robust optimization models.}, language = {en} }