@inproceedings{CriadoMartinezRubioPokutta, author = {Criado, Francisco and Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Fast Algorithms for Packing Proportional Fairness and its Dual}, series = {Proceedings of the Conference on Neural Information Processing Systems}, volume = {36}, booktitle = {Proceedings of the Conference on Neural Information Processing Systems}, abstract = {The proportional fair resource allocation problem is a major problem studied in flow control of networks, operations research, and economic theory, where it has found numerous applications. This problem, defined as the constrained maximization of sum_i log x_i, is known as the packing proportional fairness problem when the feasible set is defined by positive linear constraints and x ∈ R≥0. In this work, we present a distributed accelerated first-order method for this problem which improves upon previous approaches. We also design an algorithm for the optimization of its dual problem. Both algorithms are width-independent.}, language = {en} } @inproceedings{MartinezRubioWirthPokutta, author = {Mart{\´i}nez-Rubio, David and Wirth, Elias and Pokutta, Sebastian}, title = {Accelerated and Sparse Algorithms for Approximate Personalized PageRank and Beyond}, series = {Proceedings of Machine Learning Research}, volume = {195}, booktitle = {Proceedings of Machine Learning Research}, pages = {1 -- 35}, abstract = {It has recently been shown that ISTA, an unaccelerated optimization method, presents sparse updates for the ℓ1-regularized undirected personalized PageRank problem (Fountoulakis et al., 2019), leading to cheap iteration complexity and providing the same guarantees as the approximate personalized PageRank algorithm (APPR) (Andersen et al., 2006). In this work, we design an accelerated optimization algorithm for this problem that also performs sparse updates, providing an affirmative answer to the COLT 2022 open question of Fountoulakis and Yang (2022). Acceleration provides a reduced dependence on the condition number, while the dependence on the sparsity in our updates differs from the ISTA approach. Further, we design another algorithm by using conjugate directions to achieve an exact solution while exploiting sparsity. Both algorithms lead to faster convergence for certain parameter regimes. Our findings apply beyond PageRank and work for any quadratic objective whose Hessian is a positive-definite 푀-matrix.}, language = {en} } @inproceedings{MartinezRubioPokutta, author = {Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Accelerated Riemannian Optimization: Handling Constraints with a Prox to Bound Geometric Penalties}, series = {Proceedings of Thirty Sixth Conference on Learning Theory}, volume = {195}, booktitle = {Proceedings of Thirty Sixth Conference on Learning Theory}, pages = {359 -- 393}, abstract = {We propose a globally-accelerated, first-order method for the optimization of smooth and (strongly or not) geodesically-convex functions in a wide class of Hadamard manifolds. We achieve the same convergence rates as Nesterov's accelerated gradient descent, up to a multiplicative geometric penalty and log factors. Crucially, we can enforce our method to stay within a compact set we define. Prior fully accelerated works \emph{resort to assuming} that the iterates of their algorithms stay in some pre-specified compact set, except for two previous methods of limited applicability. For our manifolds, this solves the open question in (Kim and Yang, 2022) about obtaining global general acceleration without iterates assumptively staying in the feasible set.In our solution, we design an accelerated Riemannian inexact proximal point algorithm, which is a result that was unknown even with exact access to the proximal operator, and is of independent interest. For smooth functions, we show we can implement the prox step inexactly with first-order methods in Riemannian balls of certain diameter that is enough for global accelerated optimization.}, language = {en} } @inproceedings{CriscitielloMartinezRubioBoumal, author = {Criscitiello, Christopher and Mart{\´i}nez-Rubio, David and Boumal, Nicolas}, title = {Open problem: polynomial linearly-convergent method for g-convex optimization?}, series = {Proceedings of Thirty Sixth Conference on Learning Theory, PMLR}, volume = {195}, booktitle = {Proceedings of Thirty Sixth Conference on Learning Theory, PMLR}, pages = {5950 -- 5956}, language = {en} } @inproceedings{MartinezRubioRouxCriscitielloetal., author = {Mart{\´i}nez-Rubio, David and Roux, Christophe and Criscitiello, Christopher and Pokutta, Sebastian}, title = {Accelerated Riemannian Min-Max Optimization Ensuring Bounded Geometric Penalties}, series = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2023)}, booktitle = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2023)}, language = {en} } @inproceedings{MartinezRubioPokutta, author = {Mart{\´i}nez-Rubio, David and Pokutta, Sebastian}, title = {Accelerated Riemannian optimization: Handling constraints with a prox to bound geometric penalties}, series = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2022)}, booktitle = {Proceedings of Optimization for Machine Learning (NeurIPS Workshop OPT 2022)}, language = {en} }