@article{ConradGenzelCvetkovicetal.2017, author = {Conrad, Tim and Genzel, Martin and Cvetkovic, Nada and Wulkow, Niklas and Vybiral, Jan and Kutyniok, Gitta and Sch{\"u}tte, Christof}, title = {Sparse Proteomics Analysis - a compressed sensing-based approach for feature selection and classification of high-dimensional proteomics mass spectrometry data}, volume = {18}, journal = {BMC Bioinformatics}, number = {160}, doi = {10.1186/s12859-017-1565-4}, pages = {1 -- 20}, year = {2017}, abstract = {Motivation: High-throughput proteomics techniques, such as mass spectrometry (MS)-based approaches, produce very high-dimensional data-sets. In a clinical setting one is often interested how MS spectra dier between patients of different classes, for example spectra from healthy patients vs. spectra from patients having a particular disease. Machine learning algorithms are needed to (a) identify these discriminating features and (b) classify unknown spectra based on this feature set. Since the acquired data is usually noisy, the algorithms should be robust to noise and outliers, and the identied feature set should be as small as possible. Results: We present a new algorithm, Sparse Proteomics Analysis (SPA), based on the theory of Compressed Sensing that allows to identify a minimal discriminating set of features from mass spectrometry data-sets. We show how our method performs on artificial and real-world data-sets.}, language = {en} } @article{SeeberConradHoppeetal.2017, author = {Seeber, L. and Conrad, Tim and Hoppe, Christian and Obermeier, Patrick and Chen, X. and Karsch, K. and Muehlhans, S. and Tief, Franziska and Boettcher, Sindy and Diedrich, S. and Schweiger, Brunhilde and Rath, Barbara}, title = {Educating parents about the vaccination status of their children: A user-centered mobile application}, volume = {5}, journal = {Preventive Medicine Reports}, doi = {10.1016/j.pmedr.2017.01.002}, pages = {241 -- 250}, year = {2017}, abstract = {Parents are often uncertain about the vaccination status of their children. In times of vaccine hesitancy, vaccination programs could benefit from active patient participation. The Vaccination App (VAccApp) was developed by the Vienna Vaccine Safety Initiative, enabling parents to learn about the vaccination status of their children, including 25 different routine, special indication and travel vaccines listed in the WHO Immunization Certificate of Vaccination (WHO-ICV). Between 2012 and 2014, the VAccApp was validated in a hospital-based quality management program in Berlin, Germany, in collaboration with the Robert Koch Institute. Parents of 178 children were asked to transfer the immunization data of their children from the WHO-ICV into the VAccApp. The respective WHO-ICV was photocopied for independent, professional data entry (gold standard). Demonstrating the status quo in vaccine information reporting, a Recall Group of 278 parents underwent structured interviews for verbal immunization histories, without the respective WHO-ICV. Only 9\% of the Recall Group were able to provide a complete vaccination status; on average 39\% of the questions were answered correctly. Using the WHO-ICV with the help of the VAccApp resulted in 62\% of parents providing a complete vaccination status; on average 95\% of the questions were answered correctly. After using the VAccApp, parents were more likely to remember key aspects of the vaccination history. User-friendly mobile applications empower parents to take a closer look at the vaccination record, thereby taking an active role in providing accurate vaccination histories. Parents may become motivated to ask informed questions and to keep vaccinations up-to-date.}, language = {en} } @misc{SakuraiHegeKuhnetal.2017, author = {Sakurai, Daisuke and Hege, Hans-Christian and Kuhn, Alexander and Rust, Henning and Kern, Bastian and Breitkopf, Tom-Lukas}, title = {An Application-Oriented Framework for Feature Tracking in Atmospheric Sciences}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-72617}, year = {2017}, abstract = {In atmospheric sciences, sizes of data sets grow continuously due to increasing resolutions. A central task is the comparison of spatiotemporal fields, to assess different simulations and to compare simulations with observations. A significant information reduction is possible by focusing on geometric-topological features of the fields or on derived meteorological objects. Due to the huge size of the data sets, spatial features have to be extracted in time slices and traced over time. Fields with chaotic component, i.e. without 1:1 spatiotemporal correspondences, can be compared by looking upon statistics of feature properties. Feature extraction, however, requires a clear mathematical definition of the features - which many meteorological objects still lack. Traditionally, object extractions are often heuristic, defined only by implemented algorithms, and thus are not comparable. This work surveys our framework designed for efficient development of feature tracking methods and for testing new feature definitions. The framework supports well-established visualization practices and is being used by atmospheric researchers to diagnose and compare data.}, language = {en} } @inproceedings{JayrannejadConrad2017, author = {Jayrannejad, Fahrnaz and Conrad, Tim}, title = {Better Interpretable Models for Proteomics Data Analysis Using rule-based Mining}, booktitle = {Springer Lecture Notes in Artificial Intelligence}, pages = {studi}, year = {2017}, abstract = {Recent advances in -omics technology has yielded in large data-sets in many areas of biology, such as mass spectrometry based proteomics. However, analyzing this data is still a challenging task mainly due to the very high dimensionality and high noise content of the data. One of the main objectives of the analysis is the identification of relevant patterns (or features) which can be used for classification of new samples to healthy or diseased. So, a method is required to find easily interpretable models from this data. To gain the above mentioned goal, we have adapted the disjunctive association rule mining algorithm, TitanicOR, to identify emerging patterns from our mass spectrometry proteomics data-sets. Comparison to five state-of-the-art methods shows that our method is advantageous them in terms of identifying the inter-dependency between the features and the TP-rate and precision of the features selected. We further demonstrate the applicability of our algorithm to one previously published clinical data-set.}, language = {en} } @article{ZhukovaHiepenKnausetal.2017, author = {Zhukova, Yulia and Hiepen, Christian and Knaus, Petra and Osterland, Marc and Prohaska, Steffen and Dunlop, John W. C. and Fratzl, Peter and Skorb, Ekaterina V.}, title = {The role of titanium surface nanotopography on preosteoblast morphology, adhesion and migration}, journal = {Advanced Healthcare Materials}, doi = {10.1002/adhm.201601244}, year = {2017}, abstract = {Surface structuring of titanium-based implants with appropriate nanotopographies can significantly modulate their impact on the biological behavior of cells populating these implants. Implant assisted bone tissue repair and regeneration require functional adhesion and expansion of bone progenitors. The surface nanotopography of implant materials used to support bone healing and its effect on cell behavior, in particular cell adhesion, spreading, expansion, and motility, is still not clearly understood. The aim of this study is to investigate preosteoblast proliferation, adhesion, morphology, and migration on different titanium materials with similar surface chemistry, but distinct nanotopographical features. Sonochemical treatment and anodic oxidation were employed to fabricate disordered - mesoporous titania (TMS), and ordered - titania nanotubular (TNT) topographies respectively. The morphological evaluation revealed a surface dependent shape, thickness, and spreading of cells owing to different adherence behavior. Cells were polygonal-shaped and well-spread on glass and TMS, but displayed an elongated fibroblast-like morphology on TNT surfaces. The cells on glass however, were much flatter than on nanostructured surfaces. Both nanostructured surfaces impaired cell adhesion, but TMS was more favorable for cell growth due to its support of cell attachment and spreading in contrast to TNT. Quantitative wound healing assay in combination with live-cell imaging revealed that cells seeded on TMS surfaces migrated in close proximity to neighboring cells and less directed when compared to the migratory behavior on other surfaces. The results indicate distinctly different cell adhesion and migration on ordered and disordered titania nanotopographies, providing important information that could be used in optimizing titanium-based scaffold design to foster bone tissue growth and repair.}, language = {en} } @article{BaumLindowHegeetal.2017, author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, volume = {123}, journal = {Applied Physics A}, number = {3}, doi = {10.1007/s00339-017-0808-6}, pages = {171}, year = {2017}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @misc{BaumLindowHegeetal.2017, author = {Baum, Daniel and Lindow, Norbert and Hege, Hans-Christian and Lepper, Verena and Siopi, Tzulia and Kutz, Frank and Mahlow, Kristin and Mahnke, Heinz-Eberhard}, title = {Revealing hidden text in rolled and folded papyri}, issn = {1438-0064}, doi = {10.1007/s00339-017-0808-6}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-61826}, year = {2017}, abstract = {Ancient Egyptian papyri are often folded, rolled up or kept as small packages, sometimes even sealed. Physically unrolling or unfolding these packages might severely damage them. We demonstrate a way to get access to the hidden script without physical unfolding by employing computed tomography and mathematical algorithms for virtual unrolling and unfolding. Our algorithmic approaches are combined with manual interaction. This provides the necessary flexibility to enable the unfolding of even complicated and partly damaged papyrus packages. In addition, it allows us to cope with challenges posed by the structure of ancient papyrus, which is rather irregular, compared to other writing substrates like metallic foils or parchment. Unfolding of packages is done in two stages. In the first stage, we virtually invert the physical folding process step by step until the partially unfolded package is topologically equivalent to a scroll or a papyrus sheet folded only along one fold line. To minimize distortions at this stage, we apply the method of moving least squares. In the second stage, the papyrus is simply flattened, which requires the definition of a medial surface. We have applied our software framework to several papyri. In this work, we present the results of applying our approaches to mockup papyri that were either rolled or folded along perpendicular fold lines. In the case of the folded papyrus, our approach represents the first attempt to address the unfolding of such complicated folds.}, language = {en} } @article{BernardSalamancaThunbergetal.2017, author = {Bernard, Florian and Salamanca, Luis and Thunberg, Johan and Tack, Alexander and Jentsch, Dennis and Lamecker, Hans and Zachow, Stefan and Hertel, Frank and Goncalves, Jorge and Gemmar, Peter}, title = {Shape-aware Surface Reconstruction from Sparse 3D Point-Clouds}, volume = {38}, journal = {Medical Image Analysis}, doi = {10.1016/j.media.2017.02.005}, pages = {77 -- 89}, year = {2017}, abstract = {The reconstruction of an object's shape or surface from a set of 3D points plays an important role in medical image analysis, e.g. in anatomy reconstruction from tomographic measurements or in the process of aligning intra-operative navigation and preoperative planning data. In such scenarios, one usually has to deal with sparse data, which significantly aggravates the problem of reconstruction. However, medical applications often provide contextual information about the 3D point data that allow to incorporate prior knowledge about the shape that is to be reconstructed. To this end, we propose the use of a statistical shape model (SSM) as a prior for surface reconstruction. The SSM is represented by a point distribution model (PDM), which is associated with a surface mesh. Using the shape distribution that is modelled by the PDM, we formulate the problem of surface reconstruction from a probabilistic perspective based on a Gaussian Mixture Model (GMM). In order to do so, the given points are interpreted as samples of the GMM. By using mixture components with anisotropic covariances that are "oriented" according to the surface normals at the PDM points, a surface-based fitting is accomplished. Estimating the parameters of the GMM in a maximum a posteriori manner yields the reconstruction of the surface from the given data points. We compare our method to the extensively used Iterative Closest Points method on several different anatomical datasets/SSMs (brain, femur, tibia, hip, liver) and demonstrate superior accuracy and robustness on sparse data.}, language = {en} } @article{SchenklMuggenthalerHubigetal.2017, author = {Schenkl, Sebastian and Muggenthaler, Holger and Hubig, Michael and Erdmann, Bodo and Weiser, Martin and Zachow, Stefan and Heinrich, Andreas and G{\"u}ttler, Felix Victor and Teichgr{\"a}ber, Ulf and Mall, Gita}, title = {Automatic CT-based finite element model generation for temperature-based death time estimation: feasibility study and sensitivity analysis}, volume = {131}, journal = {International Journal of Legal Medicine}, number = {3}, doi = {doi:10.1007/s00414-016-1523-0}, pages = {699 -- 712}, year = {2017}, abstract = {Temperature based death time estimation is based either on simple phenomenological models of corpse cooling or on detailed physical heat transfer models. The latter are much more complex, but allow a higher accuracy of death time estimation as in principle all relevant cooling mechanisms can be taken into account. Here, a complete work flow for finite element based cooling simulation models is presented. The following steps are demonstrated on CT-phantoms: • CT-scan • Segmentation of the CT images for thermodynamically relevant features of individual geometries • Conversion of the segmentation result into a Finite Element (FE) simulation model • Computation of the model cooling curve • Calculation of the cooling time For the first time in FE-based cooling time estimation the steps from the CT image over segmentation to FE model generation are semi-automatically performed. The cooling time calculation results are compared to cooling measurements performed on the phantoms under controlled conditions. In this context, the method is validated using different CTphantoms. Some of the CT phantoms thermodynamic material parameters had to be experimentally determined via independent experiments. Moreover the impact of geometry and material parameter uncertainties on the estimated cooling time is investigated by a sensitivity analysis.}, language = {en} } @inproceedings{JoachimskyAmbellanZachow2017, author = {Joachimsky, Robert and Ambellan, Felix and Zachow, Stefan}, title = {Computerassistierte Auswahl und Platzierung von interpositionalen Spacern zur Behandlung fr{\"u}her Gonarthrose}, volume = {16}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65321}, pages = {106 -- 111}, year = {2017}, abstract = {Degenerative Gelenkerkrankungen, wie die Osteoarthrose, sind ein h{\"a}ufiges Krankheitsbild unter {\"a}lteren Erwachsenen. Hierbei verringert sich u.a. der Gelenkspalt aufgrund degenerierten Knorpels oder gesch{\"a}digter Menisci. Ein in den Gelenkspalt eingebrachter interpositionaler Spacer soll die mit der Osteoarthrose einhergehende verringerte Gelenkkontaktfl{\"a}che erh{\"o}hen und so der teilweise oder vollst{\"a}ndige Gelenkersatz hinausgez{\"o}gert oder vermieden werden. In dieser Arbeit pr{\"a}sentieren wir eine Planungssoftware f{\"u}r die Auswahl und Positionierung eines interpositionalen Spacers am Patientenmodell. Auf einer MRT-basierten Bildsegmentierung aufbauend erfolgt eine geometrische Rekonstruktion der 3D-Anatomie des Kniegelenks. Anhand dieser wird der Gelenkspalt bestimmt, sowie ein Spacer ausgew{\"a}hlt und algorithmisch vorpositioniert. Die Positionierung des Spacers ist durch den Benutzer jederzeit interaktiv anpassbar. F{\"u}r jede Positionierung eines Spacers wird ein Fitness-Wert zur Knieanatomie des jeweiligen Patienten berechnet und den Nutzern R{\"u}ckmeldung hinsichtlich Passgenauigkeit gegeben. Die Software unterst{\"u}tzt somit als Entscheidungshilfe die behandelnden {\"A}rzte bei der patientenspezifischen Spacerauswahl.}, language = {de} } @inproceedings{AmbellanTackWilsonetal.2017, author = {Ambellan, Felix and Tack, Alexander and Wilson, Dave and Anglin, Carolyn and Lamecker, Hans and Zachow, Stefan}, title = {Evaluating two methods for Geometry Reconstruction from Sparse Surgical Navigation Data}, volume = {16}, booktitle = {Proceedings of the Jahrestagung der Deutschen Gesellschaft f{\"u}r Computer- und Roboterassistierte Chirurgie (CURAC)}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65339}, pages = {24 -- 30}, year = {2017}, abstract = {In this study we investigate methods for fitting a Statistical Shape Model (SSM) to intraoperatively acquired point cloud data from a surgical navigation system. We validate the fitted models against the pre-operatively acquired Magnetic Resonance Imaging (MRI) data from the same patients. We consider a cohort of 10 patients who underwent navigated total knee arthroplasty. As part of the surgical protocol the patients' distal femurs were partially digitized. All patients had an MRI scan two months pre-operatively. The MRI data were manually segmented and the reconstructed bone surfaces used as ground truth against which the fit was compared. Two methods were used to fit the SSM to the data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). For both approaches, the difference between model fit and ground truth surface averaged less than 1.7 mm and excellent correspondence with the distal femoral morphology can be demonstrated.}, language = {en} } @misc{PapazovHege2017, author = {Papazov, Chavdar and Hege, Hans-Christian}, title = {Blue-noise Optimized Point Sets Based on Procrustes Analysis}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-65356}, year = {2017}, abstract = {In this paper, we propose a new method for optimizing the blue noise characteristics of point sets. It is based on Procrustes analysis, a technique for adjusting shapes to each other by applying optimal elements of an appropriate transformation group. We adapt this technique to the problem at hand and introduce a very simple, efficient and provably convergent point set optimizer.}, language = {en} } @misc{Reddy2017, type = {Master Thesis}, author = {Reddy, Gutha Vaishnavi}, title = {Automatic Classification of 3D MRI data using Deep Convolutional Neural Networks}, pages = {60}, year = {2017}, abstract = {The chronic disease of Osteoarthritis of the knee that causes pain and discomfort in the knee is associated with the degradation of the joint between the tibia and the femur. The degeneration of this joint is attributed partially to the damage of the meniscus of the knee which forms an important part of the knee joint. Magnetic Resonance Imaging (MRI) is used to diagnose such a kind of osteoarthritis by identifying the degeneration of the knee meniscus. A computer aided diagnostic system that aims to assist a doctor in decision making regarding such a diagnosis can expedite the very diagnosis. Diagnostic decision making for medical imaging falls into the category of classification for a computer vision task. Very Deep Convolutional Networks have been central to the largest advances in computer vision, in recent years. This work entails application of such convolutional networks for the purpose of recognizing a meniscus tear in MRI images as attempting a step towards developing a computer aided diagnosis system for osteoarthritis. Consequently, state-of-the-art pre-trained image recognition networks namely Alexnet, Inceptionv3, VGG and Resnet and Xception were trained on MRI data of the knee meniscus to see if they work for the task of recognizing a tear. A comparison of their classification performance on MRI data was done. The best performing model was the fine-tuned InceptionV3 network which achieved an accuracy close to 60\% for classifying 600 patients based on presence of a tear or not.}, language = {en} } @misc{AmbellanTackWilsonetal.2017, author = {Ambellan, Felix and Tack, Alexander and Wilson, Dave and Anglin, Carolyn and Lamecker, Hans and Zachow, Stefan}, title = {Evaluating two methods for Geometry Reconstruction from Sparse Surgical Navigation Data}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66052}, year = {2017}, abstract = {In this study we investigate methods for fitting a Statistical Shape Model (SSM) to intraoperatively acquired point cloud data from a surgical navigation system. We validate the fitted models against the pre-operatively acquired Magnetic Resonance Imaging (MRI) data from the same patients. We consider a cohort of 10 patients who underwent navigated total knee arthroplasty. As part of the surgical protocol the patients' distal femurs were partially digitized. All patients had an MRI scan two months pre-operatively. The MRI data were manually segmented and the reconstructed bone surfaces used as ground truth against which the fit was compared. Two methods were used to fit the SSM to the data, based on (1) Iterative Closest Points (ICP) and (2) Gaussian Mixture Models (GMM). For both approaches, the difference between model fit and ground truth surface averaged less than 1.7 mm and excellent correspondence with the distal femoral morphology can be demonstrated.}, language = {en} } @misc{GreweSchreiber2017, author = {Grewe, Carl Martin and Schreiber, Lisa}, title = {Digital Image Archive. The Archiving and Coding of Emotions}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {281 -- 286}, year = {2017}, language = {en} } @misc{GreweZachow2017, author = {Grewe, Carl Martin and Zachow, Stefan}, title = {Face to Face-Interface}, journal = {+ultra. Knowledge \& Gestaltung}, editor = {Doll, Nikola and Bredekamp, Horst and Sch{\"a}ffner, Wolfgang}, publisher = {Seemann Henschel}, pages = {320 -- 321}, year = {2017}, language = {en} } @inproceedings{PapazovHege2017, author = {Papazov, Chavdar and Hege, Hans-Christian}, title = {Blue-noise Optimized Point Sets Based on Procrustes Analysis}, booktitle = {SIGGRAPH Asia 2017 Technical Briefs}, doi = {10.1145/3145749.3149442}, pages = {20:1 -- 20:4}, year = {2017}, language = {en} } @article{RathConradMylesetal.2017, author = {Rath, Barbara and Conrad, Tim and Myles, Puja and Alchikh, Maren and Ma, Xiaolin and Hoppe, Christian and Tief, Franziska and Chen, Xi and Obermeier, Patrick and Kisler, Bron and Schweiger, Brunhilde}, title = {Influenza and other respiratory viruses: standardizing disease severity in surveillance and clinical trials}, volume = {15}, journal = {Expert Review of Anti-infective Therapy}, number = {6}, doi = {10.1080/14787210.2017.1295847}, pages = {545 -- 568}, year = {2017}, abstract = {Introduction: Influenza-Like Illness is a leading cause of hospitalization in children. Disease burden due to influenza and other respiratory viral infections is reported on a population level, but clinical scores measuring individual changes in disease severity are urgently needed. Areas covered: We present a composite clinical score allowing individual patient data analyses of disease severity based on systematic literature review and WHO-criteria for uncomplicated and complicated disease. The 22-item ViVI Disease Severity Score showed a normal distribution in a pediatric cohort of 6073 children aged 0-18 years (mean age 3.13; S.D. 3.89; range: 0 to 18.79). Expert commentary: The ViVI Score was correlated with risk of antibiotic use as well as need for hospitalization and intensive care. The ViVI Score was used to track children with influenza, respiratory syncytial virus, human metapneumovirus, human rhinovirus, and adenovirus infections and is fully compliant with regulatory data standards. The ViVI Disease Severity Score mobile application allows physicians to measure disease severity at the point-of care thereby taking clinical trials to the next level.}, language = {en} } @article{ShaoCannistraciConrad2017, author = {Shao, Borong and Cannistraci, Carlo Vittorio and Conrad, Tim}, title = {Epithelial Mesenchymal Transition Network-Based Feature Engineering in Lung Adenocarcinoma Prognosis Prediction Using Multiple Omic Data}, volume = {3}, journal = {Genomics and Computational Biology}, number = {3}, doi = {10.18547/gcb.2017.vol3.iss3.e57}, pages = {e57}, year = {2017}, abstract = {Epithelial mesenchymal transition (EMT) process has been shown as highly relevant to cancer prognosis. However, although different biological network-based biomarker identification methods have been proposed to predict cancer prognosis, EMT network has not been directly used for this purpose. In this study, we constructed an EMT regulatory network consisting of 87 molecules and tried to select features that are useful for prognosis prediction in Lung Adenocarcinoma (LUAD). To incorporate multiple molecular profiles, we obtained four types of molecular data including mRNA-Seq, copy number alteration (CNA), DNA methylation, and miRNA-Seq data from The Cancer Genome Atlas. The data were mapped to the EMT network in three alternative ways: mRNA-Seq and miRNA-Seq, DNA methylation, and CNA and miRNA-Seq. Each mapping was employed to extract five different sets of features using discretization and network-based biomarker identification methods. Each feature set was then used to predict prognosis with SVM and logistic regression classifiers. We measured the prediction accuracy with AUC and AUPR values using 10 times 10-fold cross validation. For a more comprehensive evaluation, we also measured the prediction accuracies of clinical features, EMT plus clinical features, randomly picked 87 molecules from each data mapping, and using all molecules from each data type. Counter-intuitively, EMT features do not always outperform randomly selected features and the prediction accuracies of the five feature sets are mostly not significantly different. Clinical features are shown to give the highest prediction accuracies. In addition, the prediction accuracies of both EMT features and random features are comparable as using all features (more than 17,000) from each data type.}, language = {en} } @article{JayrannejadConrad2017, author = {Jayrannejad, Fahrnaz and Conrad, Tim}, title = {Better Interpretable Models for Proteomics Data Analysis Using rule-based Mining}, journal = {Springer Lecture Notes in Artificial Intelligence}, year = {2017}, abstract = {Recent advances in -omics technology has yielded in large data-sets in many areas of biology, such as mass spectrometry based proteomics. However, analyzing this data is still a challenging task mainly due to the very high dimensionality and high noise content of the data. One of the main objectives of the analysis is the identification of relevant patterns (or features) which can be used for classification of new samples to healthy or diseased. So, a method is required to find easily interpretable models from this data. To gain the above mentioned goal, we have adapted the disjunctive association rule mining algorithm, TitanicOR, to identify emerging patterns from our mass spectrometry proteomics data-sets. Comparison to five state-of-the-art methods shows that our method is advantageous them in terms of identifying the inter-dependency between the features and the TP-rate and precision of the features selected. We further demonstrate the applicability of our algorithm to one previously published clinical data-set.}, language = {en} } @article{MoldenhauerWeiserZachow2017, author = {Moldenhauer, Marian and Weiser, Martin and Zachow, Stefan}, title = {Adaptive Algorithms for Optimal Hip Implant Positioning}, volume = {17}, journal = {PAMM}, number = {1}, doi = {10.1002/pamm.201710071}, pages = {203 -- 204}, year = {2017}, abstract = {In an aging society where the number of joint replacements rises, it is important to also increase the longevity of implants. In particular hip implants have a lifetime of at most 15 years. This derives primarily from pain due to implant migration, wear, inflammation, and dislocation, which is affected by the positioning of the implant during the surgery. Current joint replacement practice uses 2D software tools and relies on the experience of surgeons. Especially the 2D tools fail to take the patients' natural range of motion as well as stress distribution in the 3D joint induced by different daily motions into account. Optimizing the hip joint implant position for all possible parametrized motions under the constraint of a contact problem is prohibitively expensive as there are too many motions and every position change demands a recalculation of the contact problem. For the reduction of the computational effort, we use adaptive refinement on the parameter domain coupled with the interpolation method of Kriging. A coarse initial grid is to be locally refined using goal-oriented error estimation, reducing locally high variances. This approach will be combined with multi-grid optimization such that numerical errors are reduced.}, language = {en} } @misc{KnoetelSeidelZaslanskyetal.2017, author = {Kn{\"o}tel, David and Seidel, Ronald and Zaslansky, Paul and Prohaska, Steffen and Dean, Mason N. and Baum, Daniel}, title = {Automated Segmentation of Complex Patterns in Biological Tissues: Lessons from Stingray Tessellated Cartilage (Supplementary Material)}, doi = {10.12752/4.DKN.1.0}, year = {2017}, abstract = {Supplementary data to reproduce and understand key results from the related publication, including original image data and processed data. In particular, sections from hyomandibulae harvested from specimens of round stingray Urobatis halleri, donated from another study (DOI: 10.1002/etc.2564). Specimens were from sub-adults/adults collected by beach seine from collection sites in San Diego and Seal Beach, California, USA. The hyomandibulae were mounted in clay, sealed in ethanol-humidified plastic tubes and scanned with a Skyscan 1172 desktop μCT scanner (Bruker μCT, Kontich, Belgium) in association with another study (DOI: 10.1111/joa.12508). Scans for all samples were performed with voxel sizes of 4.89 μm at 59 kV source voltage and 167 μA source current, over 360◦ sample 120 rotation. For our segmentations, the datasets were resampled to a voxel size of 9.78 μm to reduce the size of the images and speed up processing. In addition, the processed data that was generated with the visualization software Amira with techniques described in the related publication based on the mentioned specimens.}, language = {en} } @inproceedings{SeblanyHombergVincensetal.2017, author = {Seblany, Feda and Homberg, Ulrike and Vincens, Eric and Winkler, Paul and Witt, Karl Josef}, title = {Merging criteria for the definition of a local pore and the CSD computation of granular materials}, booktitle = {Proceedings of the 25th meeting of the Working Group on Internal Erosion in embankment dams and their foundations}, publisher = {Deltares}, address = {Delft, Netherlands}, isbn = {978-90-827468-1-5 | 978-90-827468-0-8}, pages = {150 -- 159}, year = {2017}, language = {en} } @inproceedings{SakuraiHegeKuhnetal.2017, author = {Sakurai, Daisuke and Hege, Hans-Christian and Kuhn, Alexander and Rust, Henning and Kern, Bastian and Breitkopf, Tom-Lukas}, title = {An Application-Oriented Framework for Feature Tracking in Atmospheric Sciences}, booktitle = {Proceedings of 2017 IEEE 7th Symposium on Large Data Analysis and Visualization (LDAV)}, doi = {10.1109/LDAV.2017.8231857}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-66685}, pages = {96 -- 97}, year = {2017}, abstract = {In atmospheric sciences, sizes of data sets grow continuously due to increasing resolutions. A central task is the comparison of spatiotemporal fields, to assess different simulations and to compare simulations with observations. A significant information reduction is possible by focusing on geometric-topological features of the fields or on derived meteorological objects. Due to the huge size of the data sets, spatial features have to be extracted in time slices and traced over time. Fields with chaotic component, i.e. without 1:1 spatiotemporal correspondences, can be compared by looking upon statistics of feature properties. Feature extraction, however, requires a clear mathematical definition of the features - which many meteorological objects still lack. Traditionally, object extractions are often heuristic, defined only by implemented algorithms, and thus are not comparable. This work surveys our framework designed for efficient development of feature tracking methods and for testing new feature definitions. The framework supports well-established visualization practices and is being used by atmospheric researchers to diagnose and compare data.}, language = {en} } @article{GuentherKuhnHegeetal.2017, author = {G{\"u}nther, Tobias and Kuhn, Alexander and Hege, Hans-Christian and Gross, Markus and Theisel, Holger}, title = {Progressive Monte Carlo rendering of atmospheric flow features across scales}, volume = {2}, journal = {Physical Review Fluids}, doi = {10.1103/PhysRevFluids.2.090502}, pages = {09050-1 -- 09050-3}, year = {2017}, abstract = {To improve existing weather prediction and reanalysis capabilities, high-resolution and multi-modal climate data becomes an increasingly important topic. The advent of increasingly dense numerical simulation of atmospheric phenomena, provides new means to better understand dynamic processes and to visualize structural flow patterns that remain hidden otherwise. In the presented illustrations we demonstrate an advanced technique to visualize multiple scales of dense flow fields and Lagrangian patterns therein, simulated by state-of-the-art simulation models for each scale. They provide a deeper insight into the structural differences and patterns that occur on each scale and highlight the complexity of flow phenomena in our atmosphere. This paper is associated with a poster winner of a 2016 APS/DFD Milton van Dyke Award for work presented at the DFD Gallery of Fluid Motion. The original poster is available from the Gallery of Fluid Motion, https://doi.org/10.1103/APS.DFD.2016.GFM.P0030}, language = {en} } @misc{AboulhassanSicatBaumetal.2017, author = {Aboulhassan, Amal and Sicat, Ronell and Baum, Daniel and Wodo, Olga and Hadwiger, Markus}, title = {Comparative Visual Analysis of Structure-Performance Relations in Complex Bulk-Heterojunction Morphologies}, issn = {1438-0064}, url = {http://nbn-resolving.de/urn:nbn:de:0297-zib-63239}, year = {2017}, abstract = {The structure of Bulk-Heterojunction (BHJ) materials, the main component of organic photovoltaic solar cells, is very complex, and the relationship between structure and performance is still largely an open question. Overall, there is a wide spectrum of fabrication configurations resulting in different BHJ morphologies and correspondingly different performances. Current state- of-the-art methods for assessing the performance of BHJ morphologies are either based on global quantification of morphological features or simply on visual inspection of the morphology based on experimental imaging. This makes finding optimal BHJ structures very challenging. Moreover, finding the optimal fabrication parameters to get an optimal structure is still an open question. In this paper, we propose a visual analysis framework to help answer these questions through comparative visualization and parameter space exploration for local morphology features. With our approach, we enable scientists to explore multivariate correlations between local features and performance indicators of BHJ morphologies. Our framework is built on shape-based clustering of local cubical regions of the morphology that we call patches. This enables correlating the features of clusters with intuition-based performance indicators computed from geometrical and topological features of charge paths.}, language = {en} }