@misc{Sullivan2016, author = {Sullivan, T. J.}, title = {Well-posed Bayesian inverse problems and heavy-tailed stable Banach space priors}, issn = {1438-0064}, doi = {10.3934/ipi.2017040}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59422}, year = {2016}, abstract = {This article extends the framework of Bayesian inverse problems in infinite-dimensional parameter spaces, as advocated by Stuart (Acta Numer. 19:451-559, 2010) and others, to the case of a heavy-tailed prior measure in the family of stable distributions, such as an infinite-dimensional Cauchy distribution, for which polynomial moments are infinite or undefined. It is shown that analogues of the Karhunen-Lo{\`e}ve expansion for square-integrable random variables can be used to sample such measures. Furthermore, under weaker regularity assumptions than those used to date, the Bayesian posterior measure is shown to depend Lipschitz continuously in the Hellinger metric upon perturbations of the misfit function and observed data.}, language = {en} } @misc{CockayneOatesSullivanetal., author = {Cockayne, Jon and Oates, Chris and Sullivan, T. J. and Girolami, Mark}, title = {Probabilistic Meshless Methods for Partial Differential Equations and Bayesian Inverse Problems}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-59513}, abstract = {This paper develops a class of meshless methods that are well-suited to statistical inverse problems involving partial differential equations (PDEs). The methods discussed in this paper view the forcing term in the PDE as a random field that induces a probability distribution over the residual error of a symmetric collocation method. This construction enables the solution of challenging inverse problems while accounting, in a rigorous way, for the impact of the discretisation of the forward problem. In particular, this confers robustness to failure of meshless methods, with statistical inferences driven to be more conservative in the presence of significant solver error. In addition, (i) a principled learning-theoretic approach to minimise the impact of solver error is developed, and (ii) the challenging setting of inverse problems with a non-linear forward model is considered. The method is applied to parameter inference problems in which non-negligible solver error must be accounted for in order to draw valid statistical conclusions.}, language = {en} }