@inproceedings{CriadoMartinezRubioPokutta2022, author = {Criado, Francisco and Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Fast Algorithms for Packing Proportional Fairness and its Dual}, volume = {36}, booktitle = {Proceedings of the Conference on Neural Information Processing Systems}, year = {2022}, abstract = {The proportional fair resource allocation problem is a major problem studied in flow control of networks, operations research, and economic theory, where it has found numerous applications. This problem, defined as the constrained maximization of sum_i log x_i, is known as the packing proportional fairness problem when the feasible set is defined by positive linear constraints and x ∈ R≥0. In this work, we present a distributed accelerated first-order method for this problem which improves upon previous approaches. We also design an algorithm for the optimization of its dual problem. Both algorithms are width-independent.}, language = {en} } @inproceedings{HendrychBesanconMartinezRubioetal.2025, author = {Hendrych, Deborah and Besan{\c{c}}on, Mathieu and Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Secant line search for Frank-Wolfe algorithms}, volume = {267}, booktitle = {Proceedings of the 42nd International Conference on Machine Learning}, arxiv = {http://arxiv.org/abs/2501.18775}, pages = {23005 -- 23029}, year = {2025}, language = {en} } @inproceedings{RouxMartinezRubioPokutta2025, author = {Roux, Christophe and Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Implicit Riemannian optimism with applications to min-max problems}, volume = {267}, booktitle = {Proceedings of the 42nd International Conference on Machine Learning}, arxiv = {http://arxiv.org/abs/2501.18381}, pages = {52139 -- 52172}, year = {2025}, language = {en} } @inproceedings{MartinezRubioRouxPokutta2024, author = {Mart{\´i}nez-Rubio, David and Roux, Christophe and Pokutta, Sebastian}, title = {Convergence and Trade-Offs in Riemannian Gradient Descent and Riemannian Proximal Point}, volume = {235}, booktitle = {Proceedings of the 41st International Conference on Machine Learning}, pages = {34920 -- 34948}, year = {2024}, abstract = {In this work, we analyze two of the most fundamental algorithms in geodesically convex optimization: Riemannian gradient descent and (possibly inexact) Riemannian proximal point. We quantify their rates of convergence and produce different variants with several trade-offs. Crucially, we show the iterates naturally stay in a ball around an optimizer, of radius depending on the initial distance and, in some cases, on the curvature. Previous works simply assumed bounded iterates, resulting in rates that were not fully quantified. We also provide an implementable inexact proximal point algorithm and prove several new useful properties of Riemannian proximal methods: they work when positive curvature is present, the proximal operator does not move points away from any optimizer, and we quantify the smoothness of its induced Moreau envelope. Further, we explore beyond our theory with empirical tests.}, language = {en} } @inproceedings{MartinezRubioRouxCriscitielloetal.2025, author = {Mart{\´i}nez-Rubio, David and Roux, Christophe and Criscitiello, Christopher and Pokutta, Sebastian}, title = {Accelerated Methods for Riemannian Min-Max Optimization Ensuring Bounded Geometric Penalties}, volume = {258}, booktitle = {Proceedings of The 28th International Conference on Artificial Intelligence and Statistics}, pages = {280 -- 288}, year = {2025}, language = {en} } @inproceedings{MartinezRubioWirthPokutta2023, author = {Mart{\´i}nez-Rubio, David and Wirth, Elias and Pokutta, Sebastian}, title = {Accelerated and Sparse Algorithms for Approximate Personalized PageRank and Beyond}, volume = {195}, booktitle = {Proceedings of Machine Learning Research}, pages = {1 -- 35}, year = {2023}, abstract = {It has recently been shown that ISTA, an unaccelerated optimization method, presents sparse updates for the ℓ1-regularized undirected personalized PageRank problem (Fountoulakis et al., 2019), leading to cheap iteration complexity and providing the same guarantees as the approximate personalized PageRank algorithm (APPR) (Andersen et al., 2006). In this work, we design an accelerated optimization algorithm for this problem that also performs sparse updates, providing an affirmative answer to the COLT 2022 open question of Fountoulakis and Yang (2022). Acceleration provides a reduced dependence on the condition number, while the dependence on the sparsity in our updates differs from the ISTA approach. Further, we design another algorithm by using conjugate directions to achieve an exact solution while exploiting sparsity. Both algorithms lead to faster convergence for certain parameter regimes. Our findings apply beyond PageRank and work for any quadratic objective whose Hessian is a positive-definite 푀-matrix.}, language = {en} } @inproceedings{MartinezRubioPokutta2023, author = {Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Accelerated Riemannian Optimization: Handling Constraints with a Prox to Bound Geometric Penalties}, volume = {195}, booktitle = {Proceedings of Thirty Sixth Conference on Learning Theory}, pages = {359 -- 393}, year = {2023}, abstract = {We propose a globally-accelerated, first-order method for the optimization of smooth and (strongly or not) geodesically-convex functions in a wide class of Hadamard manifolds. We achieve the same convergence rates as Nesterov's accelerated gradient descent, up to a multiplicative geometric penalty and log factors. Crucially, we can enforce our method to stay within a compact set we define. Prior fully accelerated works \emph{resort to assuming} that the iterates of their algorithms stay in some pre-specified compact set, except for two previous methods of limited applicability. For our manifolds, this solves the open question in (Kim and Yang, 2022) about obtaining global general acceleration without iterates assumptively staying in the feasible set.In our solution, we design an accelerated Riemannian inexact proximal point algorithm, which is a result that was unknown even with exact access to the proximal operator, and is of independent interest. For smooth functions, we show we can implement the prox step inexactly with first-order methods in Riemannian balls of certain diameter that is enough for global accelerated optimization.}, language = {en} } @inproceedings{CriscitielloMartinezRubioBoumal2023, author = {Criscitiello, Christopher and Mart{\´i}nez-Rubio, David and Boumal, Nicolas}, title = {Open problem: polynomial linearly-convergent method for g-convex optimization?}, volume = {195}, booktitle = {Proceedings of Thirty Sixth Conference on Learning Theory, PMLR}, pages = {5950 -- 5956}, year = {2023}, language = {en} } @inproceedings{MartinezRubioPokutta2022, author = {Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Accelerated Riemannian optimization: Handling constraints with a prox to bound geometric penalties}, booktitle = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2022)}, year = {2022}, language = {en} } @inproceedings{MartinezRubioRouxCriscitielloetal.2023, author = {Mart{\´i}nez-Rubio, David and Roux, Christophe and Criscitiello, Christopher and Pokutta, Sebastian}, title = {Accelerated Riemannian Min-Max Optimization Ensuring Bounded Geometric Penalties}, booktitle = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2023)}, year = {2023}, language = {en} }