@unpublished{AignerBaermannBraunetal.2023, author = {Aigner, Kevin-Martin and B{\"a}rmann, Andreas and Braun, Kristin and Liers, Frauke and Pokutta, Sebastian and Schneider, Oskar and Sharma, Kartikey and Tschuppik, Sebastian}, title = {Data-driven Distributionally Robust Optimization over Time}, year = {2023}, abstract = {Stochastic Optimization (SO) is a classical approach for optimization under uncertainty that typically requires knowledge about the probability distribution of uncertain parameters. As the latter is often unknown, Distributionally Robust Optimization (DRO) provides a strong alternative that determines the best guaranteed solution over a set of distributions (ambiguity set). In this work, we present an approach for DRO over time that uses online learning and scenario observations arriving as a data stream to learn more about the uncertainty. Our robust solutions adapt over time and reduce the cost of protection with shrinking ambiguity. For various kinds of ambiguity sets, the robust solutions converge to the SO solution. Our algorithm achieves the optimization and learning goals without solving the DRO problem exactly at any step. We also provide a regret bound for the quality of the online strategy which converges at a rate of \$ O(\log T / \sqrt{T})\$, where \$T\$ is the number of iterations. Furthermore, we illustrate the effectiveness of our procedure by numerical experiments on mixed-integer optimization instances from popular benchmark libraries and give practical examples stemming from telecommunications and routing. Our algorithm is able to solve the DRO over time problem significantly faster than standard reformulations.}, language = {en} } @unpublished{KreimeierPokuttaWaltheretal.2022, author = {Kreimeier, Timo and Pokutta, Sebastian and Walther, Andrea and Woodstock, Zev}, title = {On a Frank-Wolfe Approach for Abs-smooth Functions}, year = {2022}, abstract = {We propose an algorithm which appears to be the first bridge between the fields of conditional gradient methods and abs-smooth optimization. Our nonsmooth nonconvex problem setting is motivated by machine learning, since the broad class of abs-smooth functions includes, for instance, the squared \$\ell_2\$-error of a neural network with ReLU or hinge Loss activation. To overcome the nonsmoothness in our problem, we propose a generalization to the traditional Frank-Wolfe gap and prove that first-order minimality is achieved when it vanishes. We derive a convergence rate for our algorithm which is identical to the smooth case. Although our algorithm necessitates the solution of a subproblem which is more challenging than the smooth case, we provide an efficient numerical method for its partial solution, and we identify several applications where our approach fully solves the subproblem. Numerical and theoretical convergence is demonstrated, yielding several conjectures.}, language = {en} } @unpublished{GoessMartinPokuttaetal., author = {G{\"o}ß, Adrian and Martin, Alexander and Pokutta, Sebastian and Sharma, Kartikey}, title = {Norm-induced Cuts: Optimization with Lipschitzian Black-box Functions}, abstract = {Optimal control problems usually involve constraints which model physical states and their possible transitions. These are represented by ordinary or partial differential equations (ODEs/PDEs) which add a component of infinite dimension to the problem. In recent literature, one method to simulate such ODEs/PDEs are physics-informed neural networks. Typically, neural networks are highly non-linear which makes their addition to optimization problems challenging. Hence, we leverage their often available Lipschitz property on a compact domain. The respective Lipschitz constants have to be computed only once and are accessible thereafter. We present a method that, based on this property, iteratively adds cuts involving the violation of the constraints by the current incumbent and the Lipschitz constant. Hereby, the "shape" of a cut depends on the norm used. We prove the correctness of the method by showing that it either returns an optimal solution when terminating or creates a sequence with optimal accumulation points. This is complemented by a discussion about the termination in the infeasible case, as well as an analysis of the problem complexity. For the analysis, we show that the lower and upper iteration bound asymptotically coincide when the relative approximation error goes to zero. In the end, we visualize the method on a small example based on a two-dimensional non-convex optimization problem, as well as stress the necessity of having a globally optimal oracle for the sub-problems by another example.}, language = {en} }