@misc{Sagnol, author = {Sagnol, Guillaume}, title = {Network-related problems in Optimal Experimental Design and Second Order Cone Programming}, volume = {51}, number = {51}, issn = {1438-0064}, doi = {10.2478/v10127-012-0016-x}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14942}, pages = {161 -- 171}, abstract = {In the past few years several applications of optimal experimental designs have emerged to optimize the measurements in communication networks. The optimal design problems arising from this kind of applications share three interesting properties: (i) measurements are only available at a small number of locations of the network; (ii) each monitor can simultaneously measure several quantities, which can be modeled by ``multiresponse experiments"; (iii) the observation matrices depend on the topology of the network. In this paper, we give an overview of these experimental design problems and recall recent results for the computation of optimal designs by Second Order Cone Programming (SOCP). New results for the network-monitoring of a discrete time process are presented. In particular, we show that the optimal design problem for the monitoring of an AR1 process can be reduced to the standard form and we give experimental results.}, language = {en} } @misc{KlebanovSikorskiSchuetteetal., author = {Klebanov, Ilja and Sikorski, Alexander and Sch{\"u}tte, Christof and R{\"o}blitz, Susanna}, title = {Prior estimation and Bayesian inference from large cohort data sets}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-57475}, abstract = {One of the main goals of mathematical modelling in systems biology related to medical applications is to obtain patient-specific parameterisations and model predictions. In clinical practice, however, the number of available measurements for single patients is usually limited due to time and cost restrictions. This hampers the process of making patient-specific predictions about the outcome of a treatment. On the other hand, data are often available for many patients, in particular if extensive clinical studies have been performed. Using these population data, we propose an iterative algorithm for contructing an informative prior distribution, which then serves as the basis for computing patient-specific posteriors and obtaining individual predictions. We demonsrate the performance of our method by applying it to a low-dimensional parameter estimation problem in a toy model as well as to a high-dimensional ODE model of the human menstrual cycle, which represents a typical example from systems biology modelling.}, language = {en} } @misc{KlebanovSikorskiSchuetteetal., author = {Klebanov, Ilja and Sikorski, Alexander and Sch{\"u}tte, Christof and R{\"o}blitz, Susanna}, title = {Empirical Bayes Methods for Prior Estimation in Systems Medicine}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61307}, abstract = {One of the main goals of mathematical modelling in systems medicine related to medical applications is to obtain patient-specific parameterizations and model predictions. In clinical practice, however, the number of available measurements for single patients is usually limited due to time and cost restrictions. This hampers the process of making patient-specific predictions about the outcome of a treatment. On the other hand, data are often available for many patients, in particular if extensive clinical studies have been performed. Therefore, before applying Bayes' rule separately to the data of each patient (which is typically performed using a non-informative prior), it is meaningful to use empirical Bayes methods in order to construct an informative prior from all available data. We compare the performance of four priors - a non-informative prior and priors chosen by nonparametric maximum likelihood estimation (NPMLE), by maximum penalized lilelihood estimation (MPLE) and by doubly-smoothed maximum likelihood estimation (DS-MLE) - by applying them to a low-dimensional parameter estimation problem in a toy model as well as to a high-dimensional ODE model of the human menstrual cycle, which represents a typical example from systems biology modelling.}, language = {en} } @misc{LieSullivanTeckentrup, author = {Lie, Han Cheng and Sullivan, T. J. and Teckentrup, Aretha}, title = {Random forward models and log-likelihoods in Bayesian inverse problems}, series = {SIAM/ASA Journal on Uncertainty Quantification}, volume = {6}, journal = {SIAM/ASA Journal on Uncertainty Quantification}, number = {4}, issn = {1438-0064}, doi = {10.1137/18M1166523}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66324}, pages = {1600 -- 1629}, abstract = {We consider the use of randomised forward models and log-likelihoods within the Bayesian approach to inverse problems. Such random approximations to the exact forward model or log-likelihood arise naturally when a computationally expensive model is approximated using a cheaper stochastic surrogate, as in Gaussian process emulation (kriging), or in the field of probabilistic numerical methods. We show that the Hellinger distance between the exact and approximate Bayesian posteriors is bounded by moments of the difference between the true and approximate log-likelihoods. Example applications of these stability results are given for randomised misfit models in large data applications and the probabilistic solution of ordinary differential equations.}, language = {en} } @misc{SagnolHarman, author = {Sagnol, Guillaume and Harman, Radoslav}, title = {Computing exact D-optimal designs by mixed integer second order cone programming}, issn = {1438-0064}, doi = {10.1214/15-AOS1339}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-41932}, abstract = {Let the design of an experiment be represented by an \$s\$-dimensional vector \$\vec{w}\$ of weights with non-negative components. Let the quality of \$\vec{w}\$ for the estimation of the parameters of the statistical model be measured by the criterion of \$D\$-optimality defined as the \$m\$-th root of the determinant of the information matrix \$M(\vec{w})=\sum_{i=1}^s w_iA_iA_i^T\$, where \$A_i\$, \$i=1,...,s\$, are known matrices with \$m\$ rows. In the paper, we show that the criterion of \$D\$-optimality is second-order cone representable. As a result, the method of second order cone programming can be used to compute an approximate \$D\$-optimal design with any system of linear constraints on the vector of weights. More importantly, the proposed characterization allows us to compute an \emph{exact} \$D\$-optimal design, which is possible thanks to high-quality branch-and-cut solvers specialized to solve mixed integer second order cone problems. We prove that some other widely used criteria are also second order cone representable, for instance the criteria of \$A\$-, and \$G\$-optimality, as well as the criteria of \$D_K\$- and \$A_K\$-optimality, which are extensions of \$D\$-, and \$A\$-optimality used in the case when only a specific system of linear combinations of parameters is of interest. We present several numerical examples demonstrating the efficiency and universality of the proposed method. We show that in many cases the mixed integer second order cone programming approach allows us to find a provably optimal exact design, while the standard heuristics systematically miss the optimum.}, language = {en} } @misc{Sullivan2016, author = {Sullivan, T. J.}, title = {Well-posed Bayesian inverse problems and heavy-tailed stable Banach space priors}, issn = {1438-0064}, doi = {10.3934/ipi.2017040}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59422}, year = {2016}, abstract = {This article extends the framework of Bayesian inverse problems in infinite-dimensional parameter spaces, as advocated by Stuart (Acta Numer. 19:451-559, 2010) and others, to the case of a heavy-tailed prior measure in the family of stable distributions, such as an infinite-dimensional Cauchy distribution, for which polynomial moments are infinite or undefined. It is shown that analogues of the Karhunen-Lo{\`e}ve expansion for square-integrable random variables can be used to sample such measures. Furthermore, under weaker regularity assumptions than those used to date, the Bayesian posterior measure is shown to depend Lipschitz continuously in the Hellinger metric upon perturbations of the misfit function and observed data.}, language = {en} } @misc{HarmanSagnol, author = {Harman, Radoslav and Sagnol, Guillaume}, title = {Computing D-optimal experimental designs for estimating treatment contrasts under the presence of a nuisance time trend}, issn = {1438-0064}, doi = {10.1007/978-3-319-13881-7_10}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-53640}, abstract = {We prove a mathematical programming characterisation of approximate partial D-optimality under general linear constraints. We use this characterisation with a branch-and-bound method to compute a list of all exact D-optimal designs for estimating a pair of treatment contrasts in the presence of a nuisance time trend up to the size of 24 consecutive trials.}, language = {en} } @misc{NavaYazdaniHegevonTycowiczetal., author = {Nava-Yazdani, Esfandiar and Hege, Hans-Christian and von Tycowicz, Christoph and Sullivan, T. J.}, title = {A Shape Trajectories Approach to Longitudinal Statistical Analysis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-69759}, abstract = {For Kendall's shape space we determine analytically Jacobi fields and parallel transport, and compute geodesic regression. Using the derived expressions, we can fully leverage the geometry via Riemannian optimization and reduce the computational expense by several orders of magnitude. The methodology is demonstrated by performing a longitudinal statistical analysis of epidemiological shape data. As application example we have chosen 3D shapes of knee bones, reconstructed from image data of the Osteoarthritis Initiative. Comparing subject groups with incident and developing osteoarthritis versus normal controls, we find clear differences in the temporal development of femur shapes. This paves the way for early prediction of incident knee osteoarthritis, using geometry data only.}, language = {en} } @misc{Sagnol, author = {Sagnol, Guillaume}, title = {A Class of Semidefinite Programs with rank-one solutions}, issn = {1438-0064}, doi = {10.1016/j.laa.2011.03.027}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-14933}, abstract = {We show that a class of semidefinite programs (SDP) admits a solution that is a positive semidefinite matrix of rank at most \$r\$, where \$r\$ is the rank of the matrix involved in the objective function of the SDP. The optimization problems of this class are semidefinite packing problems, which are the SDP analogs to vector packing problems. Of particular interest is the case in which our result guarantees the existence of a solution of rank one: we show that the computation of this solution actually reduces to a Second Order Cone Program (SOCP). We point out an application in statistics, in the optimal design of experiments.}, language = {en} } @misc{KlebanovSikorskiSchuetteetal., author = {Klebanov, Ilja and Sikorski, Alexander and Sch{\"u}tte, Christof and R{\"o}blitz, Susanna}, title = {Empirical Bayes Methods, Reference Priors, Cross Entropy and the EM Algorithm}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61230}, abstract = {When estimating a probability density within the empirical Bayes framework, the non-parametric maximum likelihood estimate (NPMLE) usually tends to overfit the data. This issue is usually taken care of by regularization - a penalization term is subtracted from the marginal log-likelihood before the maximization step, so that the estimate favors smooth solutions, resulting in the so-called maximum penalized likelihood estimation (MPLE). The majority of penalizations currently in use are rather arbitrary brute-force solutions, which lack invariance under transformation of the parameters(reparametrization) and measurements. This contradicts the principle that, if the underlying model has several equivalent formulations, the methods of inductive inference should lead to consistent results. Motivated by this principle and using an information-theoretic point of view, we suggest an entropy-based penalization term that guarantees this kind of invariance. The resulting density estimate can be seen as a generalization of reference priors. Using the reference prior as a hyperprior, on the other hand, is argued to be a poor choice for regularization. We also present an insightful connection between the NPMLE, the cross entropy and the principle of minimum discrimination information suggesting another method of inference that contains the doubly-smoothed maximum likelihood estimation as a special case.}, language = {en} }