@article{Vu‐HanSunkaraBermudez‐Schettinoetal.2025, author = {Vu-Han, Tu-Lan and Sunkara, Vikram and Bermudez-Schettino, Rodrigo and Schwechten, Jakob and Runge, Robin and Perka, Carsten and Winkler, Tobias and Pokutta, Sebastian and Weiß, Claudia and Pumberger, Matthias}, title = {Feature Engineering for the Prediction of Scoliosis in 5q-Spinal Muscular Atrophy}, volume = {16}, journal = {Journal of Cachexia, Sarcopenia and Muscle}, number = {1}, doi = {10.1002/jcsm.13599}, pages = {e13599}, year = {2025}, language = {en} } @inproceedings{TroppensBesanconWilkenetal.2025, author = {Troppens, Hannah and Besan{\c{c}}on, Mathieu and Wilken, St. Elmo and Pokutta, Sebastian}, title = {Mixed-Integer Optimization for Loopless Flux Distributions in Metabolic Networks}, volume = {338}, booktitle = {23rd International Symposium on Experimental Algorithms (SEA 2025)}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum f{\"u}r Informatik}, doi = {10.4230/LIPIcs.SEA.2025.26}, pages = {26:1 -- 26:18}, year = {2025}, language = {en} } @inproceedings{GłuchTuranNagarajanetal.2025, author = {Głuch, Grzegorz and Turan, Berkant and Nagarajan, Sai Ganesh and Pokutta, Sebastian}, title = {The good, the bad and the ugly: watermarks, transferable attacks and adversarial defenses}, booktitle = {1st Workshop on GenAI Watermarking, collocated with ICLR 2025}, year = {2025}, language = {en} } @inproceedings{BesanconPokuttaWirth2025, author = {Besan{\c{c}}on, Mathieu and Pokutta, Sebastian and Wirth, Elias}, title = {The Pivoting Framework: Frank-Wolfe Algorithms with Active Set Size Control}, volume = {258}, booktitle = {Proceedings of The 28th International Conference on Artificial Intelligence and Statistics}, pages = {271 -- 279}, year = {2025}, language = {en} } @inproceedings{MartinezRubioRouxCriscitielloetal.2025, author = {Mart{\´i}nez-Rubio, David and Roux, Christophe and Criscitiello, Christopher and Pokutta, Sebastian}, title = {Accelerated Methods for Riemannian Min-Max Optimization Ensuring Bounded Geometric Penalties}, volume = {258}, booktitle = {Proceedings of The 28th International Conference on Artificial Intelligence and Statistics}, pages = {280 -- 288}, year = {2025}, language = {en} } @inproceedings{SadikuWagnerPokutta2025, author = {Sadiku, Shpresim and Wagner, Moritz and Pokutta, Sebastian}, title = {GSE: Group-wise sparse and explainable adversarial attacks}, booktitle = {13th International Conference on Learning Representations (ICLR 2025)}, arxiv = {http://arxiv.org/abs/2311.17434}, year = {2025}, language = {en} } @article{WirthPenaPokutta2025, author = {Wirth, Elias and Pena, Javier and Pokutta, Sebastian}, title = {Correction: Accelerated affine-invariant convergence rates of the Frank-Wolfe algorithm with open-loop step-sizes}, volume = {214}, journal = {Mathematical Programming}, doi = {10.1007/s10107-025-02214-3}, pages = {941 -- 942}, year = {2025}, language = {en} } @inproceedings{RouxZimmerPokutta2025, author = {Roux, Christophe and Zimmer, Max and Pokutta, Sebastian}, title = {On the byzantine-resilience of distillation-based federated learning}, booktitle = {13th International Conference on Learning Representations (ICLR 2025)}, arxiv = {http://arxiv.org/abs/2402.12265}, year = {2025}, abstract = {Federated Learning (FL) algorithms using Knowledge Distillation (KD) have received increasing attention due to their favorable properties with respect to privacy, non-i.i.d. data and communication cost. These methods depart from transmitting model parameters and instead communicate information about a learning task by sharing predictions on a public dataset. In this work, we study the performance of such approaches in the byzantine setting, where a subset of the clients act in an adversarial manner aiming to disrupt the learning process. We show that KD-based FL algorithms are remarkably resilient and analyze how byzantine clients can influence the learning process. Based on these insights, we introduce two new byzantine attacks and demonstrate their ability to break existing byzantine-resilient methods. Additionally, we propose a novel defence method which enhances the byzantine resilience of KD-based FL algorithms. Finally, we provide a general framework to obfuscate attacks, making them significantly harder to detect, thereby improving their effectiveness.}, language = {en} } @inproceedings{SadikuWagnerNagarajanetal.2025, author = {Sadiku, Shpresim and Wagner, Moritz and Nagarajan, Sai Ganesh and Pokutta, Sebastian}, title = {S-CFE: Simple Counterfactual Explanations}, volume = {258}, booktitle = {Proceedings of The 28th International Conference on Artificial Intelligence and Statistics}, pages = {172 -- 180}, year = {2025}, language = {en} } @inproceedings{MartinezRubioWirthPokutta2023, author = {Mart{\´i}nez-Rubio, David and Wirth, Elias and Pokutta, Sebastian}, title = {Accelerated and Sparse Algorithms for Approximate Personalized PageRank and Beyond}, volume = {195}, booktitle = {Proceedings of Machine Learning Research}, pages = {1 -- 35}, year = {2023}, abstract = {It has recently been shown that ISTA, an unaccelerated optimization method, presents sparse updates for the ℓ1-regularized undirected personalized PageRank problem (Fountoulakis et al., 2019), leading to cheap iteration complexity and providing the same guarantees as the approximate personalized PageRank algorithm (APPR) (Andersen et al., 2006). In this work, we design an accelerated optimization algorithm for this problem that also performs sparse updates, providing an affirmative answer to the COLT 2022 open question of Fountoulakis and Yang (2022). Acceleration provides a reduced dependence on the condition number, while the dependence on the sparsity in our updates differs from the ISTA approach. Further, we design another algorithm by using conjugate directions to achieve an exact solution while exploiting sparsity. Both algorithms lead to faster convergence for certain parameter regimes. Our findings apply beyond PageRank and work for any quadratic objective whose Hessian is a positive-definite 푀-matrix.}, language = {en} } @article{CombettesPokutta2023, author = {Combettes, Cyrille and Pokutta, Sebastian}, title = {Revisiting the Approximate Carath{\´e}odory Problem via the Frank-Wolfe Algorithm}, volume = {197}, journal = {Mathematical Programming}, doi = {10.1007/s10107-021-01735-x}, pages = {191 -- 214}, year = {2023}, language = {en} } @incollection{ZimmerSpiegelPokutta2025, author = {Zimmer, Max and Spiegel, Christoph and Pokutta, Sebastian}, title = {Compression-aware training of neural networks using Frank-Wolfe}, booktitle = {Mathematical Optimization for Machine Learning: Proceedings of the MATH+ Thematic Einstein Semester 2023}, editor = {Fackeldey, K.}, publisher = {De Gruyter}, doi = {10.1515/9783111376776-010}, pages = {137 -- 168}, year = {2025}, language = {en} } @article{BaermannMartinPokuttaetal.2018, author = {B{\"a}rmann, Andreas and Martin, Alexander and Pokutta, Sebastian and Schneider, Oskar}, title = {An Online-Learning Approach to Inverse Optimization}, year = {2018}, language = {en} } @inproceedings{CardereraDiakonikolasLinetal.2021, author = {Carderera, Alejandro and Diakonikolas, Jelena and Lin, Cheuk Yin and Pokutta, Sebastian}, title = {Parameter-free Locally Accelerated Conditional Gradients}, booktitle = {ICML 2021}, year = {2021}, abstract = {Projection-free conditional gradient (CG) methods are the algorithms of choice for constrained optimization setups in which projections are often computationally prohibitive but linear optimization over the constraint set remains computationally feasible. Unlike in projection-based methods, globally accelerated convergence rates are in general unattainable for CG. However, a very recent work on Locally accelerated CG (LaCG) has demonstrated that local acceleration for CG is possible for many settings of interest. The main downside of LaCG is that it requires knowledge of the smoothness and strong convexity parameters of the objective function. We remove this limitation by introducing a novel, Parameter-Free Locally accelerated CG (PF-LaCG) algorithm, for which we provide rigorous convergence guarantees. Our theoretical results are complemented by numerical experiments, which demonstrate local acceleration and showcase the practical improvements of PF-LaCG over non-accelerated algorithms, both in terms of iteration count and wall-clock time.}, language = {en} } @article{BienenstockMunozPokutta2018, author = {Bienenstock, Daniel and Mu{\~n}oz, Gonzalo and Pokutta, Sebastian}, title = {Principled Deep Neural Network Training through Linear Programming}, year = {2018}, abstract = {Deep Learning has received significant attention due to its impressive performance in many state-of-the-art learning tasks. Unfortunately, while very powerful, Deep Learning is not well understood theoretically and in particular only recently results for the complexity of training deep neural networks have been obtained. In this work we show that large classes of deep neural networks with various architectures (e.g., DNNs, CNNs, Binary Neural Networks, and ResNets), activation functions (e.g., ReLUs and leaky ReLUs), and loss functions (e.g., Hinge loss, Euclidean loss, etc) can be trained to near optimality with desired target accuracy using linear programming in time that is exponential in the input data and parameter space dimension and polynomial in the size of the data set; improvements of the dependence in the input dimension are known to be unlikely assuming P≠NP, and improving the dependence on the parameter space dimension remains open. In particular, we obtain polynomial time algorithms for training for a given fixed network architecture. Our work applies more broadly to empirical risk minimization problems which allows us to generalize various previous results and obtain new complexity results for previously unstudied architectures in the proper learning setting.}, language = {en} } @inproceedings{Kerdreuxd'AspremontPokutta2020, author = {Kerdreux, Thomas and d'Aspremont, Alexandre and Pokutta, Sebastian}, title = {Projection-Free Optimization on Uniformly Convex Sets}, booktitle = {To Appear in Proceedings of AISTATS}, year = {2020}, language = {en} } @article{RouxPokuttaWirthetal.2021, author = {Roux, Christophe and Pokutta, Sebastian and Wirth, Elias and Kerdreux, Thomas}, title = {Efficient Online-Bandit Strategies for Minimax Learning Problems}, year = {2021}, abstract = {Several learning problems involve solving min-max problems, e.g., empirical distributional robust learning [Namkoong and Duchi, 2016, Curi et al., 2020] or learning with non-standard aggregated losses [Shalev- Shwartz and Wexler, 2016, Fan et al., 2017]. More specifically, these problems are convex-linear problems where the minimization is carried out over the model parameters w ∈ W and the maximization over the empirical distribution p ∈ K of the training set indexes, where K is the simplex or a subset of it. To design efficient methods, we let an online learning algorithm play against a (combinatorial) bandit algorithm. We argue that the efficiency of such approaches critically depends on the structure of K and propose two properties of K that facilitate designing efficient algorithms. We focus on a specific family of sets Sn,k encompassing various learning applications and provide high-probability convergence guarantees to the minimax values.}, language = {en} } @article{KerdreuxRouxd'Aspremontetal.2021, author = {Kerdreux, Thomas and Roux, Christophe and d'Aspremont, Alexandre and Pokutta, Sebastian}, title = {Linear Bandits on Uniformly Convex Sets}, volume = {22}, journal = {Journal of Machine Learning Research}, number = {284}, pages = {1 -- 23}, year = {2021}, abstract = {Linear bandit algorithms yield O~(n√T) pseudo-regret bounds on compact convex action sets K⊂Rn and two types of structural assumptions lead to better pseudo-regret bounds. When K is the simplex or an ℓp ball with p∈]1,2], there exist bandits algorithms with O~(√n√T) pseudo-regret bounds. Here, we derive bandit algorithms for some strongly convex sets beyond ℓp balls that enjoy pseudo-regret bounds of O~(√n√T), which answers an open question from [BCB12, \S5.5.]. Interestingly, when the action set is uniformly convex but not necessarily strongly convex, we obtain pseudo-regret bounds with a dimension dependency smaller than O(√n). However, this comes at the expense of asymptotic rates in T varying between O(√T) and O(T).}, language = {en} } @article{CardereraPokuttaSchuetteetal.2021, author = {Carderera, Alejandro and Pokutta, Sebastian and Sch{\"u}tte, Christof and Weiser, Martin}, title = {An efficient first-order conditional gradient algorithm in data-driven sparse identification of nonlinear dynamics to solve sparse recovery problems under noise}, journal = {Journal of Computational and Applied Mathematics}, arxiv = {http://arxiv.org/abs/2101.02630}, year = {2021}, abstract = {Governing equations are essential to the study of nonlinear dynamics, often enabling the prediction of previously unseen behaviors as well as the inclusion into control strategies. The discovery of governing equations from data thus has the potential to transform data-rich fields where well-established dynamical models remain unknown. This work contributes to the recent trend in data-driven sparse identification of nonlinear dynamics of finding the best sparse fit to observational data in a large library of potential nonlinear models. We propose an efficient first-order Conditional Gradient algorithm for solving the underlying optimization problem. In comparison to the most prominent alternative algorithms, the new algorithm shows significantly improved performance on several essential issues like sparsity-induction, structure-preservation, noise robustness, and sample efficiency. We demonstrate these advantages on several dynamics from the field of synchronization, particle dynamics, and enzyme chemistry.}, language = {en} } @article{PokuttaSpiegelZimmer2020, author = {Pokutta, Sebastian and Spiegel, Christoph and Zimmer, Max}, title = {Deep Neural Network Training with Frank-Wolfe}, arxiv = {http://arxiv.org/abs/2010.07243}, year = {2020}, abstract = {This paper studies the empirical efficacy and benefits of using projection-free first-order methods in the form of Conditional Gradients, a.k.a. Frank-Wolfe methods, for training Neural Networks with constrained parameters. We draw comparisons both to current state-of-the-art stochastic Gradient Descent methods as well as across different variants of stochastic Conditional Gradients. In particular, we show the general feasibility of training Neural Networks whose parameters are constrained by a convex feasible region using Frank-Wolfe algorithms and compare different stochastic variants. We then show that, by choosing an appropriate region, one can achieve performance exceeding that of unconstrained stochastic Gradient Descent and matching state-of-the-art results relying on L2-regularization. Lastly, we also demonstrate that, besides impacting performance, the particular choice of constraints can have a drastic impact on the learned representations.}, language = {en} } @article{CryilleWSpiegelPokutta2020, author = {Cryille W., Combettes and Spiegel, Christoph and Pokutta, Sebastian}, title = {Projection-Free Adaptive Gradients for Large-Scale Optimization}, arxiv = {http://arxiv.org/abs/2009.14114}, year = {2020}, abstract = {The complexity in large-scale optimization can lie in both handling the objective function and handling the constraint set. In this respect, stochastic Frank-Wolfe algorithms occupy a unique position as they alleviate both computational burdens, by querying only approximate first-order information from the objective and by maintaining feasibility of the iterates without using projections. In this paper, we improve the quality of their first-order information by blending in adaptive gradients. We derive convergence rates and demonstrate the computational advantage of our method over the state-of-the-art stochastic Frank-Wolfe algorithms on both convex and nonconvex objectives. The experiments further show that our method can improve the performance of adaptive gradient algorithms for constrained optimization.}, language = {en} } @inproceedings{PfetschPokutta2020, author = {Pfetsch, Marc and Pokutta, Sebastian}, title = {IPBoost - Non-Convex Boosting via Integer Programming}, booktitle = {Proceedings of ICML}, arxiv = {http://arxiv.org/abs/2002.04679}, year = {2020}, language = {en} } @inproceedings{PokuttaSinghTorrico2020, author = {Pokutta, Sebastian and Singh, M. and Torrico, A.}, title = {On the Unreasonable Effectiveness of the Greedy Algorithm: Greedy Adapts to Sharpness}, booktitle = {Proceedings of ICML}, arxiv = {http://arxiv.org/abs/2002.04063}, year = {2020}, language = {en} } @inproceedings{DiakonikolasCardereraPokutta2020, author = {Diakonikolas, Jelena and Carderera, Alejandro and Pokutta, Sebastian}, title = {Locally Accelerated Conditional Gradients}, booktitle = {Proceedings of AISTATS}, arxiv = {http://arxiv.org/abs/1906.07867}, year = {2020}, language = {en} } @article{FaenzaMunozPokutta2020, author = {Faenza, Yuri and Mu{\~n}oz, Gonzalo and Pokutta, Sebastian}, title = {New Limits of Treewidth-based tractability in Optimization}, volume = {191}, journal = {Mathematical Programming}, arxiv = {http://arxiv.org/abs/1807.02551}, doi = {10.1007/s10107-020-01563-5}, pages = {559 -- 594}, year = {2020}, language = {en} }