@book{Sullivan, author = {Sullivan, T. J.}, title = {Introduction to Uncertainty Quantification}, volume = {63}, publisher = {Springer}, isbn = {978-3-319-23394-9}, doi = {10.1007/978-3-319-23395-6}, language = {en} } @article{OwhadiScovelSullivan, author = {Owhadi, Houman and Scovel, Clint and Sullivan, T. J.}, title = {On the Brittleness of Bayesian Inference}, series = {SIAM Review}, volume = {57}, journal = {SIAM Review}, number = {4}, doi = {10.1137/130938633}, pages = {566 -- 582}, abstract = {With the advent of high-performance computing, Bayesian methods are becoming increasingly popular tools for the quantification of uncertainty throughout science and industry. Since these methods can impact the making of sometimes critical decisions in increasingly complicated contexts, the sensitivity of their posterior conclusions with respect to the underlying models and prior beliefs is a pressing question to which there currently exist positive and negative answers. We report new results suggesting that, although Bayesian methods are robust when the number of possible outcomes is finite or when only a finite number of marginals of the data-generating distribution are unknown, they could be generically brittle when applied to continuous systems (and their discretizations) with finite information on the data-generating distribution. If closeness is defined in terms of the total variation (TV) metric or the matching of a finite system of generalized moments, then (1) two practitioners who use arbitrarily close models and observe the same (possibly arbitrarily large amount of) data may reach opposite conclusions; and (2) any given prior and model can be slightly perturbed to achieve any desired posterior conclusion. The mechanism causing brittleness/robustness suggests that learning and robustness are antagonistic requirements, which raises the possibility of a missing stability condition when using Bayesian inference in a continuous world under finite information.}, language = {en} } @misc{Sullivan2016, author = {Sullivan, T. J.}, title = {Well-posed Bayesian inverse problems and heavy-tailed stable Banach space priors}, issn = {1438-0064}, doi = {10.3934/ipi.2017040}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59422}, year = {2016}, abstract = {This article extends the framework of Bayesian inverse problems in infinite-dimensional parameter spaces, as advocated by Stuart (Acta Numer. 19:451-559, 2010) and others, to the case of a heavy-tailed prior measure in the family of stable distributions, such as an infinite-dimensional Cauchy distribution, for which polynomial moments are infinite or undefined. It is shown that analogues of the Karhunen-Lo{\`e}ve expansion for square-integrable random variables can be used to sample such measures. Furthermore, under weaker regularity assumptions than those used to date, the Bayesian posterior measure is shown to depend Lipschitz continuously in the Hellinger metric upon perturbations of the misfit function and observed data.}, language = {en} } @misc{CockayneOatesSullivanetal., author = {Cockayne, Jon and Oates, Chris and Sullivan, T. J. and Girolami, Mark}, title = {Probabilistic Meshless Methods for Partial Differential Equations and Bayesian Inverse Problems}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59513}, abstract = {This paper develops a class of meshless methods that are well-suited to statistical inverse problems involving partial differential equations (PDEs). The methods discussed in this paper view the forcing term in the PDE as a random field that induces a probability distribution over the residual error of a symmetric collocation method. This construction enables the solution of challenging inverse problems while accounting, in a rigorous way, for the impact of the discretisation of the forward problem. In particular, this confers robustness to failure of meshless methods, with statistical inferences driven to be more conservative in the presence of significant solver error. In addition, (i) a principled learning-theoretic approach to minimise the impact of solver error is developed, and (ii) the challenging setting of inverse problems with a non-linear forward model is considered. The method is applied to parameter inference problems in which non-negligible solver error must be accounted for in order to draw valid statistical conclusions.}, language = {en} }