@article{BennHiepenOsterlandetal., author = {Benn, Andreas and Hiepen, Christian and Osterland, Marc and Sch{\"u}tte, Christof and Zwijsen, An and Knaus, Petra}, title = {Role of bone morphogenetic proteins in sprouting angiogenesis: differential BMP receptor-dependent signaling pathways balance stalk vs. tip cell competence}, series = {FASEB Journal}, volume = {31}, journal = {FASEB Journal}, number = {11}, doi = {10.1096/fj.201700193RR}, pages = {4720 -- 4733}, abstract = {Before the onset of sprouting angiogenesis, the endothelium is prepatterned for the positioning of tip and stalk cells. Both cell identities are not static, as endothelial cells (ECs) constantly compete for the tip cell position in a dynamic fashion. Here, we show that both bone morphogenetic protein (BMP) 2 and BMP6 are proangiogenic in vitro and ex vivo and that the BMP type I receptors, activin receptor-like kinase (ALK)3 and ALK2, play crucial and distinct roles in this process. BMP2 activates the expression of tip cell-associated genes, such as DLL4 (delta-like ligand 4) and KDR (kinase insert domain receptor), and p38-heat shock protein 27 (HSP27)-dependent cell migration, thereby generating tip cell competence. Whereas BMP6 also triggers collective cell migration via the p38-HSP27 signaling axis, BMP6 induces in addition SMAD1/5 signaling, thereby promoting the expression of stalk cell-associated genes, such as HES1 (hairy and enhancer of split 1) and FLT1 (fms-like tyrosine kinase 1). Specifically, ALK3 is required for sprouting from HUVEC spheroids, whereas ALK2 represses sprout formation. We demonstrate that expression levels and respective complex formation of BMP type I receptors in ECs determine stalk vs. tip cell identity, thus contributing to endothelial plasticity during sprouting angiogenesis. As antiangiogenic monotherapies that target the VEGF or ALK1 pathways have not fulfilled efficacy objectives in clinical trials, the selective targeting of the ALK2/3 pathways may be an attractive new approach.}, language = {en} } @article{HuisingaBestCordesetal.1999, author = {Huisinga, Wilhelm and Best, Christoph and Cordes, Frank and Roitzsch, Rainer and Sch{\"u}tte, Christof}, title = {Identification of Molecular Conformations via Statistical Analysis of Simulation Data}, series = {Comp. Chem.}, volume = {20}, journal = {Comp. Chem.}, pages = {1760 -- 1774}, year = {1999}, language = {en} } @article{vonKleistSchuetteZhang, author = {von Kleist, Max and Sch{\"u}tte, Christof and Zhang, Wei}, title = {Statistical analysis of the first passage path ensemble of jump processes}, series = {Journal of Statistical Physics}, volume = {170}, journal = {Journal of Statistical Physics}, doi = {10.1007/s10955-017-1949-x}, pages = {809 -- 843}, abstract = {The transition mechanism of jump processes between two different subsets in state space reveals important dynamical information of the processes and therefore has attracted considerable attention in the past years. In this paper, we study the first passage path ensemble of both discrete-time and continuous-time jump processes on a finite state space. The main approach is to divide each first passage path into nonreactive and reactive segments and to study them separately. The analysis can be applied to jump processes which are non-ergodic, as well as continuous-time jump processes where the waiting time distributions are non-exponential. In the particular case that the jump processes are both Markovian and ergodic, our analysis elucidates the relations between the study of the first passage paths and the study of the transition paths in transition path theory. We provide algorithms to numerically compute statistics of the first passage path ensemble. The computational complexity of these algorithms scales with the complexity of solving a linear system, for which efficient methods are available. Several examples demonstrate the wide applicability of the derived results across research areas.}, language = {en} } @misc{OsterlandBennProhaskaetal., author = {Osterland, Marc and Benn, Andreas and Prohaska, Steffen and Sch{\"u}tte, Christof}, title = {Single Cell Tracking in Phase-Contrast Microscopy}, series = {EMBL Symposium 2015 - Seeing is Believing - Imaging the Processes of Life}, journal = {EMBL Symposium 2015 - Seeing is Believing - Imaging the Processes of Life}, abstract = {In this work, we developed an automatic algorithm to analyze cell migration in chemotaxis assays, based on phase-contrast time-lapse microscopy. While manual approaches are still widely used in recent publications, our algorithm is able to track hundreds of single cells per frame. The extracted paths are analysed with traditional geometrical approaches as well as diffusion-driven Markov state models (MSM). Based on these models, a detailed view on spatial and temporal effects is possible. Using our new approach on experimental data, we are able to distinguish between directed migration (e.g. towards a VEGF gradient) and random migration without favored direction. A calculation of the committor probabilities reveals that cells of the whole image area are more likely to migrate directly towards the VEGF than away from it during the first four hours. However, in absence of a chemoattractant, cells migrate more likely to their nearest image border. These conclusions are supported by the spatial mean directions. In a next step, the cell-cell interaction during migration and the migration of cell clusters will be analyzed. Furthermore, we want to observe phenotypical changes during migration based on fluorescence microscopy and machine learning. The algorithm is part of a collaborative platform which brings the experimental expertise of scientists from life sciences and the analytical knowledge of computer scientists together. This platform is built using web-based technologies with a responsive real-time user interface. All data, including raw and metadata as well as the accompanying results, will be stored in a secure and scalable compute cluster. The compute cluster provides sufficient space and computational power for modern image-based experiments and their analyses. Specific versions of data and results can be tagged to keep immutable records for archival.}, language = {en} } @misc{HorenkoSchmidtEhrenbergSchuette2006, author = {Horenko, Illia and Schmidt-Ehrenberg, Johannes and Sch{\"u}tte, Christof}, title = {Set-oriented dimension reduction: Localizing principal component analysis via hidden Markov models}, series = {Computational Life Sciences II}, volume = {4216}, journal = {Computational Life Sciences II}, publisher = {Springer}, pages = {98 -- 115}, year = {2006}, language = {en} } @article{WulkowKoltaiSunkaraetal., author = {Wulkow, Niklas and Koltai, P{\´e}ter and Sunkara, Vikram and Sch{\"u}tte, Christof}, title = {Data-driven modelling of nonlinear dynamics by barycentric coordinates and memory}, series = {J. Stat. Phys.}, journal = {J. Stat. Phys.}, abstract = {We present a numerical method to model dynamical systems from data. We use the recently introduced method Scalable Probabilistic Approximation (SPA) to project points from a Euclidean space to convex polytopes and represent these projected states of a system in new, lower-dimensional coordinates denoting their position in the polytope. We then introduce a specific nonlinear transformation to construct a model of the dynamics in the polytope and to transform back into the original state space. To overcome the potential loss of information from the projection to a lower-dimensional polytope, we use memory in the sense of the delay-embedding theorem of Takens. By construction, our method produces stable models. We illustrate the capacity of the method to reproduce even chaotic dynamics and attractors with multiple connected components on various examples.}, language = {en} } @article{RaharinirinaPeppertvonKleistetal., author = {Raharinirina, Alexia N. and Peppert, Felix and von Kleist, Max and Sch{\"u}tte, Christof and Sunkara, Vikram}, title = {Inferring gene regulatory networks from single-cell RNA-seq temporal snapshot data requires higher-order moments}, series = {Patterns}, volume = {2}, journal = {Patterns}, number = {9}, doi = {10.1016/j.patter.2021.100332}, abstract = {Single-cell RNA sequencing (scRNA-seq) has become ubiquitous in biology. Recently, there has been a push for using scRNA-seq snapshot data to infer the underlying gene regulatory networks (GRNs) steering cellular function. To date, this aspiration remains unrealized due to technical and computational challenges. In this work we focus on the latter, which is under-represented in the literature. We took a systemic approach by subdividing the GRN inference into three fundamental components: data pre-processing, feature extraction, and inference. We observed that the regulatory signature is captured in the statistical moments of scRNA-seq data and requires computationally intensive minimization solvers to extract it. Furthermore, current data pre-processing might not conserve these statistical moments. Although our moment-based approach is a didactic tool for understanding the different compartments of GRN inference, this line of thinking—finding computationally feasible multi-dimensional statistics of data—is imperative for designing GRN inference methods.}, language = {en} } @article{PeppertvonKleistSchuetteetal., author = {Peppert, Felix and von Kleist, Max and Sch{\"u}tte, Christof and Sunkara, Vikram}, title = {On the Sufficient Condition for Solving the Gap-Filling Problem Using Deep Convolutional Neural Networks}, series = {IEEE Transactions on Neural Networks and Learning Systems}, volume = {33}, journal = {IEEE Transactions on Neural Networks and Learning Systems}, number = {11}, doi = {10.1109/TNNLS.2021.3072746}, pages = {6194 -- 6205}, abstract = {Deep convolutional neural networks (DCNNs) are routinely used for image segmentation of biomedical data sets to obtain quantitative measurements of cellular structures like tissues. These cellular structures often contain gaps in their boundaries, leading to poor segmentation performance when using DCNNs like the U-Net. The gaps can usually be corrected by post-hoc computer vision (CV) steps, which are specific to the data set and require a disproportionate amount of work. As DCNNs are Universal Function Approximators, it is conceivable that the corrections should be obsolete by selecting the appropriate architecture for the DCNN. In this article, we present a novel theoretical framework for the gap-filling problem in DCNNs that allows the selection of architecture to circumvent the CV steps. Combining information-theoretic measures of the data set with a fundamental property of DCNNs, the size of their receptive field, allows us to formulate statements about the solvability of the gap-filling problem independent of the specifics of model training. In particular, we obtain mathematical proof showing that the maximum proficiency of filling a gap by a DCNN is achieved if its receptive field is larger than the gap length. We then demonstrate the consequence of this result using numerical experiments on a synthetic and real data set and compare the gap-filling ability of the ubiquitous U-Net architecture with variable depths. Our code is available at https://github.com/ai-biology/dcnn-gap-filling.}, language = {en} } @article{SchulzePeppertSchuetteetal., author = {Schulze, Kenrick and Peppert, Felix and Sch{\"u}tte, Christof and Sunkara, Vikram}, title = {Chimeric U-Net-Modifying the standard U-Net towards Explainability}, series = {bioRxiv}, journal = {bioRxiv}, doi = {10.1101/2022.12.01.518699}, language = {en} } @article{ShaoBjaanaesHellandetal., author = {Shao, Borong and Bjaanaes, Maria and Helland, Aslaug and Sch{\"u}tte, Christof and Conrad, Tim}, title = {EMT network-based feature selection improves prognosis prediction in lung adenocarcinoma}, series = {PLOS ONE}, volume = {14}, journal = {PLOS ONE}, number = {1}, doi = {10.1371/journal.pone.0204186}, abstract = {Various feature selection algorithms have been proposed to identify cancer prognostic biomarkers. In recent years, however, their reproducibility is criticized. The performance of feature selection algorithms is shown to be affected by the datasets, underlying networks and evaluation metrics. One of the causes is the curse of dimensionality, which makes it hard to select the features that generalize well on independent data. Even the integration of biological networks does not mitigate this issue because the networks are large and many of their components are not relevant for the phenotype of interest. With the availability of multi-omics data, integrative approaches are being developed to build more robust predictive models. In this scenario, the higher data dimensions create greater challenges. We proposed a phenotype relevant network-based feature selection (PRNFS) framework and demonstrated its advantages in lung cancer prognosis prediction. We constructed cancer prognosis relevant networks based on epithelial mesenchymal transition (EMT) and integrated them with different types of omics data for feature selection. With less than 2.5\% of the total dimensionality, we obtained EMT prognostic signatures that achieved remarkable prediction performance (average AUC values above 0.8), very significant sample stratifications, and meaningful biological interpretations. In addition to finding EMT signatures from different omics data levels, we combined these single-omics signatures into multi-omics signatures, which improved sample stratifications significantly. Both single- and multi-omics EMT signatures were tested on independent multi-omics lung cancer datasets and significant sample stratifications were obtained.}, language = {en} } @article{ConradGenzelCvetkovicetal., author = {Conrad, Tim and Genzel, Martin and Cvetkovic, Nada and Wulkow, Niklas and Vybiral, Jan and Kutyniok, Gitta and Sch{\"u}tte, Christof}, title = {Sparse Proteomics Analysis - a compressed sensing-based approach for feature selection and classification of high-dimensional proteomics mass spectrometry data}, series = {BMC Bioinformatics}, volume = {18}, journal = {BMC Bioinformatics}, number = {160}, doi = {10.1186/s12859-017-1565-4}, pages = {1 -- 20}, abstract = {Motivation: High-throughput proteomics techniques, such as mass spectrometry (MS)-based approaches, produce very high-dimensional data-sets. In a clinical setting one is often interested how MS spectra dier between patients of different classes, for example spectra from healthy patients vs. spectra from patients having a particular disease. Machine learning algorithms are needed to (a) identify these discriminating features and (b) classify unknown spectra based on this feature set. Since the acquired data is usually noisy, the algorithms should be robust to noise and outliers, and the identied feature set should be as small as possible. Results: We present a new algorithm, Sparse Proteomics Analysis (SPA), based on the theory of Compressed Sensing that allows to identify a minimal discriminating set of features from mass spectrometry data-sets. We show how our method performs on artificial and real-world data-sets.}, language = {en} } @article{ConradGenzelCvetkovicetal., author = {Conrad, Tim and Genzel, Martin and Cvetkovic, Nada and Wulkow, Niklas and Leichtle, Alexander Benedikt and Vybiral, Jan and Kytyniok, Gitta and Sch{\"u}tte, Christof}, title = {Sparse Proteomics Analysis - a compressed sensing-based approach for feature selection and classification of high-dimensional proteomics mass spectrometry data}, series = {BMC Bioinfomatics}, volume = {18}, journal = {BMC Bioinfomatics}, number = {160}, doi = {10.1186/s12859-017-1565-4}, abstract = {Background: High-throughput proteomics techniques, such as mass spectrometry (MS)-based approaches, produce very high-dimensional data-sets. In a clinical setting one is often interested in how mass spectra differ between patients of different classes, for example spectra from healthy patients vs. spectra from patients having a particular disease. Machine learning algorithms are needed to (a) identify these discriminating features and (b) classify unknown spectra based on this feature set. Since the acquired data is usually noisy, the algorithms should be robust against noise and outliers, while the identified feature set should be as small as possible. Results: We present a new algorithm, Sparse Proteomics Analysis (SPA),based on thet heory of compressed sensing that allows us to identify a minimal discriminating set of features from mass spectrometry data-sets. We show (1) how our method performs on artificial and real-world data-sets, (2) that its performance is competitive with standard (and widely used) algorithms for analyzing proteomics data, and (3) that it is robust against random and systematic noise. We further demonstrate the applicability of our algorithm to two previously published clinical data-sets.}, language = {en} } @article{VegaSchuetteConrad, author = {Vega, Iliusi and Sch{\"u}tte, Christof and Conrad, Tim}, title = {Finding metastable states in real-world time series with recurrence networks}, series = {Physica A: Statistical Mechanics and its Applications}, volume = {445}, journal = {Physica A: Statistical Mechanics and its Applications}, doi = {10.1016/j.physa.2015.10.041}, pages = {1 -- 17}, abstract = {In the framework of time series analysis with recurrence networks, we introduce a self-adaptive method that determines the elusive recurrence threshold and identifies metastable states in complex real-world time series. As initial step, we introduce a way to set the embedding parameters used to reconstruct the state space from the time series. We set them as the ones giving the maximum Shannon entropy of the diagonal line length distribution for the first simultaneous minima of recurrence rate and Shannon entropy. To identify metastable states, as well as the transitions between them, we use a soft partitioning algorithm for module finding which is specifically developed for the case in which a system shows metastability. We illustrate our method with a complex time series example. Finally, we show the robustness of our method for identifying metastable states. Our results suggest that our method is robust for identifying metastable states in complex time series, even when introducing considerable levels of noise and missing data points.}, language = {en} } @misc{SchuetteConrad, author = {Sch{\"u}tte, Christof and Conrad, Tim}, title = {Showcase 3: Information-based medicine}, series = {MATHEON-Mathematics for Key Technologies}, volume = {1}, journal = {MATHEON-Mathematics for Key Technologies}, editor = {Deuflhard, Peter and Gr{\"o}tschel, Martin and H{\"o}mberg, Dietmar and Horst, Ulrich and Kramer, J{\"u}rg and Mehrmann, Volker and Polthier, Konrad and Schmidt, Frank and Skutella, Martin and Sprekels, J{\"u}rgen}, publisher = {European Mathematical Society}, pages = {66 -- 67}, language = {en} } @misc{VegaSchuetteConrad, author = {Vega, Iliusi and Sch{\"u}tte, Christof and Conrad, Tim}, title = {SAIMeR: Self-adapted method for the identification of metastable states in real-world time series}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-50130}, abstract = {In the framework of time series analysis with recurrence networks, we introduce SAIMeR, a heuristic self-adapted method that determines the elusive recurrence threshold and identifies metastable states in complex time series. To identify metastable states as well as the transitions between them, we use graph theory concepts and a fuzzy partitioning clustering algorithm. We illustrate SAIMeR by applying it to three real-world time series and show that it is able to identify metastable states in real-world data with noise and missing data points. Finally, we suggest a way to choose the embedding parameters used to construct the state space in which this method is performed, based on the analysis of how the values of these parameters affect two recurrence quantitative measurements: recurrence rate and entropy.}, language = {en} } @article{ZhangKlusConradetal., author = {Zhang, Wei and Klus, Stefan and Conrad, Tim and Sch{\"u}tte, Christof}, title = {Learning chemical reaction networks from trajectory data}, series = {SIAM Journal on Applied Dynamical Systems (SIADS)}, volume = {18}, journal = {SIAM Journal on Applied Dynamical Systems (SIADS)}, number = {4}, doi = {10.1137/19M1265880}, pages = {2000 -- 2046}, abstract = {We develop a data-driven method to learn chemical reaction networks from trajectory data. Modeling the reaction system as a continuous-time Markov chain and assuming the system is fully observed,our method learns the propensity functions of the system with predetermined basis functions by maximizing the likelihood function of the trajectory data under l^1 sparse regularization. We demonstrate our method with numerical examples using synthetic data and carry out an asymptotic analysis of the proposed learning procedure in the infinite-data limit.}, language = {en} } @inproceedings{ChaukairSchuetteSunkara, author = {Chaukair, Mustafa and Sch{\"u}tte, Christof and Sunkara, Vikram}, title = {On the Activation Space of ReLU Equipped Deep Neural Networks}, series = {Procedia Computer Science}, volume = {222}, booktitle = {Procedia Computer Science}, doi = {10.1016/j.procs.2023.08.200}, pages = {624 -- 635}, abstract = {Modern Deep Neural Networks are getting wider and deeper in their architecture design. However, with an increasing number of parameters the decision mechanisms becomes more opaque. Therefore, there is a need for understanding the structures arising in the hidden layers of deep neural networks. In this work, we present a new mathematical framework for describing the canonical polyhedral decomposition in the input space, and in addition, we introduce the notions of collapsing- and preserving patches, pertinent to understanding the forward map and the activation space they induce. The activation space can be seen as the output of a layer and, in the particular case of ReLU activations, we prove that this output has the structure of a polyhedral complex.}, language = {en} } @misc{KostreSunkaraSchuetteetal., author = {Kostr{\´e}, Margarita and Sunkara, Vikram and Sch{\"u}tte, Christof and Djurdjevac Conrad, Nataša}, title = {Understanding the Romanization Spreading on Historical Interregional Networks in Northern Tunisia}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-86764}, abstract = {Spreading processes are important drivers of change in social systems. To understand the mechanisms of spreading it is fundamental to have information about the underlying contact network and the dynamical parameters of the process. However, in many real-wold examples, this information is not known and needs to be inferred from data. State-of-the-art spreading inference methods have mostly been applied to modern social systems, as they rely on availability of very detailed data. In this paper we study the inference challenges for historical spreading processes, for which only very fragmented information is available. To cope with this problem, we extend existing network models by formulating a model on a mesoscale with temporal spreading rate. Furthermore, we formulate the respective parameter inference problem for the extended model. We apply our approach to the romanization process of Northern Tunisia, a scarce dataset, and study properties of the inferred time-evolving interregional networks. As a result, we show that (1) optimal solutions consist of very different network structures and spreading rate functions; and that (2) these diverse solutions produce very similar spreading patterns. Finally, we discuss how inferred dominant interregional connections are related to available archaeological traces. Historical networks resulting from our approach can help understanding complex processes of cultural change in ancient times.}, language = {en} } @article{KostreSunkaraSchuetteetal.2022, author = {Kostr{\´e}, Margarita and Sunkara, Vikram and Sch{\"u}tte, Christof and Djurdjevac Conrad, Natasa}, title = {Understanding the Romanization Spreading on Historical Interregional Networks in Northern Tunisia}, series = {Applied Network Science}, volume = {7}, journal = {Applied Network Science}, publisher = {Springer Nature}, doi = {10.1007/s41109-022-00492-w}, pages = {18}, year = {2022}, abstract = {Spreading processes are important drivers of change in social systems. To understand the mechanisms of spreading it is fundamental to have information about the underlying contact network and the dynamical parameters of the process. However, in many real-wold examples, this information is not known and needs to be inferred from data. State-of-the-art spreading inference methods have mostly been applied to modern social systems, as they rely on availability of very detailed data. In this paper we study the inference challenges for historical spreading processes, for which only very fragmented information is available. To cope with this problem, we extend existing network models by formulating a model on a mesoscale with temporal spreading rate. Furthermore, we formulate the respective parameter inference problem for the extended model. We apply our approach to the romanization process of Northern Tunisia, a scarce dataset, and study properties of the inferred time-evolving interregional networks. As a result, we show that (1) optimal solutions consist of very different network structures and spreading rate functions; and that (2) these diverse solutions produce very similar spreading patterns. Finally, we discuss how inferred dominant interregional connections are related to available archaeological traces. Historical networks resulting from our approach can help understanding complex processes of cultural change in ancient times.}, language = {en} } @article{SarichDjurdjevacConradBruckneretal.2014, author = {Sarich, Marco and Djurdjevac Conrad, Natasa and Bruckner, Sharon and Conrad, Tim and Sch{\"u}tte, Christof}, title = {Modularity revisited: A novel dynamics-based concept for decomposing complex networks}, series = {Journal of Computational Dynamics}, volume = {1}, journal = {Journal of Computational Dynamics}, number = {1}, doi = {10.3934/jcd.2014.1.191}, pages = {191 -- 212}, year = {2014}, language = {en} } @article{WulkowConradDjurdjevacConradetal., author = {Wulkow, Hanna and Conrad, Tim and Djurdjevac Conrad, Natasa and M{\"u}ller, Sebastian A. and Nagel, Kai and Sch{\"u}tte, Christof}, title = {Prediction of Covid-19 spreading and optimal coordination of counter-measures: From microscopic to macroscopic models to Pareto fronts}, series = {PLOS One}, volume = {16}, journal = {PLOS One}, number = {4}, publisher = {Public Library of Science}, doi = {10.1371/journal.pone.0249676}, language = {en} } @article{SherrattSrivastavaAinslieetal., author = {Sherratt, Katharine and Srivastava, Ajitesh and Ainslie, Kylie and Singh, David E. and Cublier, Aymar and Marinescu, Maria Cristina and Carretero, Jesus and Garcia, Alberto Cascajo and Franco, Nicolas and Willem, Lander and Abrams, Steven and Faes, Christel and Beutels, Philippe and Hens, Niel and M{\"u}ller, Sebastian and Charlton, Billy and Ewert, Ricardo and Paltra, Sydney and Rakow, Christian and Rehmann, Jakob and Conrad, Tim and Sch{\"u}tte, Christof and Nagel, Kai and Abbott, Sam and Grah, Rok and Niehus, Rene and Prasse, Bastian and Sandmann, Frank and Funk, Sebastian}, title = {Characterising information gains and losses when collecting multiple epidemic model outputs}, series = {Epidemics}, volume = {47}, journal = {Epidemics}, publisher = {Elsevier BV}, issn = {1755-4365}, doi = {10.1016/j.epidem.2024.100765}, abstract = {Collaborative comparisons and combinations of epidemic models are used as policy-relevant evidence during epidemic outbreaks. In the process of collecting multiple model projections, such collaborations may gain or lose relevant information. Typically, modellers contribute a probabilistic summary at each time-step. We compared this to directly collecting simulated trajectories. We aimed to explore information on key epidemic quantities; ensemble uncertainty; and performance against data, investigating potential to continuously gain information from a single cross-sectional collection of model results. Methods We compared July 2022 projections from the European COVID-19 Scenario Modelling Hub. Five modelling teams projected incidence in Belgium, the Netherlands, and Spain. We compared projections by incidence, peaks, and cumulative totals. We created a probabilistic ensemble drawn from all trajectories, and compared to ensembles from a median across each model's quantiles, or a linear opinion pool. We measured the predictive accuracy of individual trajectories against observations, using this in a weighted ensemble. We repeated this sequentially against increasing weeks of observed data. We evaluated these ensembles to reflect performance with varying observed data. Results. By collecting modelled trajectories, we showed policy-relevant epidemic characteristics. Trajectories contained a right-skewed distribution well represented by an ensemble of trajectories or a linear opinion pool, but not models' quantile intervals. Ensembles weighted by performance typically retained the range of plausible incidence over time, and in some cases narrowed this by excluding some epidemic shapes. Conclusions. We observed several information gains from collecting modelled trajectories rather than quantile distributions, including potential for continuously updated information from a single model collection. The value of information gains and losses may vary with each collaborative effort's aims, depending on the needs of projection users. Understanding the differing information potential of methods to collect model projections can support the accuracy, sustainability, and communication of collaborative infectious disease modelling efforts. Data availability All code and data available on Github: https://github.com/covid19-forecast-hub-europe/aggregation-info-loss}, language = {en} } @article{GaskinConradPavliotisetal., author = {Gaskin, Thomas and Conrad, Tim and Pavliotis, Grigorios A. and Sch{\"u}tte, Christof}, title = {Neural parameter calibration and uncertainty quantification for epidemic forecasting}, series = {PLOS ONE}, journal = {PLOS ONE}, abstract = {The recent COVID-19 pandemic has thrown the importance of accurately forecasting contagion dynamics and learning infection parameters into sharp focus. At the same time, effective policy-making requires knowledge of the uncertainty on such predictions, in order, for instance, to be able to ready hospitals and intensive care units for a worst-case scenario without needlessly wasting resources. In this work, we apply a novel and powerful computational method to the problem of learning probability densities on contagion parameters and providing uncertainty quantification for pandemic projections. Using a neural network, we calibrate an ODE model to data of the spread of COVID-19 in Berlin in 2020, achieving both a significantly more accurate calibration and prediction than Markov-Chain Monte Carlo (MCMC)-based sampling schemes. The uncertainties on our predictions provide meaningful confidence intervals e.g. on infection figures and hospitalisation rates, while training and running the neural scheme takes minutes where MCMC takes hours. We show convergence of our method to the true posterior on a simplified SIR model of epidemics, and also demonstrate our method's learning capabilities on a reduced dataset, where a complex model is learned from a small number of compartments for which data is available.}, language = {en} }