@article{MarzahlAubrevilleBertrametal.2021, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Maier, Jennifer and Bergler, Christian and Kr{\"o}ger, Christine and Voigt, J{\"o}rn and Breininger, Katharina and Klopfleisch, Robert and Maier, Andreas}, title = {EXACT: a collaboration toolset for algorithm-aided annotation of images with annotation version control}, volume = {11}, pages = {4343}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-021-83827-4}, year = {2021}, abstract = {In many research areas, scientific progress is accelerated by multidisciplinary access to image data and their interdisciplinary annotation. However, keeping track of these annotations to ensure a high-quality multi-purpose data set is a challenging and labour intensive task. We developed the open-source online platform EXACT (EXpert Algorithm Collaboration Tool) that enables the collaborative interdisciplinary analysis of images from different domains online and offline. EXACT supports multi-gigapixel medical whole slide images as well as image series with thousands of images. The software utilises a flexible plugin system that can be adapted to diverse applications such as counting mitotic figures with a screening mode, finding false annotations on a novel validation view, or using the latest deep learning image analysis technologies. This is combined with a version control system which makes it possible to keep track of changes in the data sets and, for example, to link the results of deep learning experiments to specific data set versions. EXACT is freely available and has already been successfully applied to a broad range of annotation tasks, including highly diverse applications like deep learning supported cytology scoring, interdisciplinary multi-centre whole slide image tumour annotation, and highly specialised whale sound spectroscopy clustering.}, language = {en} } @inproceedings{MarzahlAubrevilleBertrametal.2020, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Gerlach, Stefan and Maier, Jennifer and Voigt, J{\"o}rn and Hill, Jenny and Klopfleisch, Robert and Maier, Andreas}, title = {Is crowd-algorithm collaboration an advanced alternative to crowd-sourcing on cytology slides?}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {https://doi.org/10.1007/978-3-658-29267-6_5}, pages = {26 -- 31}, year = {2020}, language = {en} } @article{AubrevilleKnipferOetteretal.2017, author = {Aubreville, Marc and Knipfer, Christian and Oetter, Nicolai and Jaremenko, Christian and Rodner, Erik and Denzler, Joachim and Bohr, Christopher and Neumann, Helmut and Stelzle, Florian and Maier, Andreas}, title = {Automatic classification of cancerous tissue in laserendomicroscopy images of the oral cavity using deep learning}, volume = {7}, pages = {11979}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-017-12320-8}, year = {2017}, abstract = {Oral Squamous Cell Carcinoma (OSCC) is a common type of cancer of the oral epithelium. Despite their high impact on mortality, sufficient screening methods for early diagnosis of OSCC often lack accuracy and thus OSCCs are mostly diagnosed at a late stage. Early detection and accurate outline estimation of OSCCs would lead to a better curative outcome and a reduction in recurrence rates after surgical treatment. Confocal Laser Endomicroscopy (CLE) records sub-surface micro-anatomical images for in vivo cell structure analysis. Recent CLE studies showed great prospects for a reliable, real-time ultrastructural imaging of OSCC in situ. We present and evaluate a novel automatic approach for OSCC diagnosis using deep learning technologies on CLE images. The method is compared against textural feature-based machine learning approaches that represent the current state of the art. For this work, CLE image sequences (7894 images) from patients diagnosed with OSCC were obtained from 4 specific locations in the oral cavity, including the OSCC lesion. The present approach is found to outperform the state of the art in CLE image recognition with an area under the curve (AUC) of 0.96 and a mean accuracy of 88.3\% (sensitivity 86.6\%, specificity 90\%).}, language = {en} } @article{AubrevilleStoeveOetteretal.2019, author = {Aubreville, Marc and Stoeve, Maike and Oetter, Nicolai and Goncalves, Miguel and Knipfer, Christian and Neumann, Helmut and Bohr, Christopher and Stelzle, Florian and Maier, Andreas}, title = {Deep learning-based detection of motion artifacts in probe-based confocal laser endomicroscopy images}, volume = {14}, journal = {International journal of computer assisted radiology and surgery}, number = {1}, publisher = {Springer}, address = {Berlin}, issn = {1861-6429}, doi = {https://doi.org/10.1007/s11548-018-1836-1}, pages = {31 -- 42}, year = {2019}, language = {en} } @inproceedings{AubrevilleGoncalvesKnipferetal.2018, author = {Aubreville, Marc and Goncalves, Miguel and Knipfer, Christian and Oetter, Nicolai and W{\"u}rfl, Tobias and Neumann, Helmut and Stelzle, Florian and Bohr, Christopher and Maier, Andreas}, title = {Patch-based Carcinoma Detection on Confocal Laser Endomicroscopy Images}, booktitle = {Proceedings of the 11th International Joint Conference on Biomedical Engineering Systems and Technologies}, subtitle = {A Cross-Site Robustness Assessment}, editor = {Wiebe, Sheldon and Gamboa, Hugo and Fred, Ana and Berm{\´u}dez i Badia, Sergi}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-278-3}, doi = {https://doi.org/10.5220/0006534700270034}, pages = {27 -- 34}, year = {2018}, abstract = {Deep learning technologies such as convolutional neural networks (CNN) provide powerful methods for image recognition and have recently been employed in the field of automated carcinoma detection in confocal laser endomicroscopy (CLE) images. CLE is a (sub-)surface microscopic imaging technique that reaches magnifications of up to 1000x and is thus suitable for in vivo structural tissue analysis. In this work, we aim to evaluate the prospects of a priorly developed deep learning-based algorithm targeted at the identification of oral squamous cell carcinoma with regard to its generalization to further anatomic locations of squamous cell carcinomas in the area of head and neck. We applied the algorithm on images acquired from the vocal fold area of five patients with histologically verified squamous cell carcinoma and presumably healthy control images of the clinically normal contra-lateral vocal cord. We find that the network trained on the oral cavity data reaches an accurac y of 89.45\% and an area-under-the-curve (AUC) value of 0.955, when applied on the vocal cords data. Compared to the state of the art, we achieve very similar results, yet with an algorithm that was trained on a completely disjunct data set. Concatenating both data sets yielded further improvements in cross-validation with an accuracy of 90.81\% and AUC of 0.970. In this study, for the first time to our knowledge, a deep learning mechanism for the identification of oral carcinomas using CLE Images could be applied to other disciplines in the area of head and neck. This study shows the prospect of the algorithmic approach to generalize well on other malignant entities of the head and neck, regardless of the anatomical location and furthermore in an examiner-independent manner.}, language = {en} } @inproceedings{MarzahlAubrevilleVoigtetal.2019, author = {Marzahl, Christian and Aubreville, Marc and Voigt, J{\"o}rn and Maier, Andreas}, title = {Classification of leukemic b-lymphoblast cells from blood smear microscopic images with an attention-based deep learning method and advanced augmentation techniques}, booktitle = {ISBI 2019 C-NMC challenge: classification in cancer cell imaging}, publisher = {Springer}, address = {Singapore}, isbn = {978-981-15-0797-7}, issn = {2195-271X}, doi = {https://doi.org/10.1007/978-981-15-0798-4_2}, pages = {13 -- 22}, year = {2019}, language = {en} } @article{AubrevilleBertramDonovanetal.2020, author = {Aubreville, Marc and Bertram, Christof and Donovan, Taryn A. and Marzahl, Christian and Maier, Andreas and Klopfleisch, Robert}, title = {A completely annotated whole slide image dataset of canine breast cancer to aid human breast cancer research}, volume = {7}, pages = {417}, journal = {Scientific data}, publisher = {Springer Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-020-00756-z}, year = {2020}, abstract = {Canine mammary carcinoma (CMC) has been used as a model to investigate the pathogenesis of human breast cancer and the same grading scheme is commonly used to assess tumor malignancy in both. One key component of this grading scheme is the density of mitotic figures (MF). Current publicly available datasets on human breast cancer only provide annotations for small subsets of whole slide images (WSIs). We present a novel dataset of 21 WSIs of CMC completely annotated for MF. For this, a pathologist screened all WSIs for potential MF and structures with a similar appearance. A second expert blindly assigned labels, and for non-matching labels, a third expert assigned the final labels. Additionally, we used machine learning to identify previously undetected MF. Finally, we performed representation learning and two-dimensional projection to further increase the consistency of the annotations. Our dataset consists of 13,907 MF and 36,379 hard negatives. We achieved a mean F1-score of 0.791 on the test set and of up to 0.696 on a human breast cancer dataset.}, language = {en} } @article{BertramAubrevilleMarzahletal.2019, author = {Bertram, Christof and Aubreville, Marc and Marzahl, Christian and Maier, Andreas and Klopfleisch, Robert}, title = {A large-scale dataset for mitotic figure assessment on whole slide images of canine cutaneous mast cell tumor}, volume = {6}, pages = {274}, journal = {Scientific data}, publisher = {Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-019-0290-4}, year = {2019}, abstract = {We introduce a novel, large-scale dataset for microscopy cell annotations. The dataset includes 32 whole slide images (WSI) of canine cutaneous mast cell tumors, selected to include both low grade cases as well as high grade cases. The slides have been completely annotated for mitotic figures and we provide secondary annotations for neoplastic mast cells, inflammatory granulocytes, and mitotic figure look-alikes. Additionally to a blinded two-expert manual annotation with consensus, we provide an algorithm-aided dataset, where potentially missed mitotic figures were detected by a deep neural network and subsequently assessed by two human experts. We included 262,481 annotations in total, out of which 44,880 represent mitotic figures. For algorithmic validation, we used a customized RetinaNet approach, followed by a cell classification network. We find F1-Scores of 0.786 and 0.820 for the manually labelled and the algorithm-aided dataset, respectively. The dataset provides, for the first time, WSIs completely annotated for mitotic figures and thus enables assessment of mitosis detection algorithms on complete WSIs as well as region of interest detection algorithms.}, language = {en} } @article{MarzahlAubrevilleBertrametal.2020, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Stayt, Jason and Jasensky, Anne-Katherine and Bartenschlager, Florian and Fragoso-Garcia, Marco and Barton, Ann K. and Elsemann, Svenja and Jabari, Samir and Krauth, Jens and Madhu, Prathmesh and Voigt, J{\"o}rn and Hill, Jenny and Klopfleisch, Robert and Maier, Andreas}, title = {Deep Learning-based quantification of pulmonary hemosiderophages in cytology slides}, volume = {10}, pages = {9795}, journal = {Scientific reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-020-65958-2}, year = {2020}, abstract = {Exercise-induced pulmonary hemorrhage (EIPH) is a common condition in sport horses with negative impact on performance. Cytology of bronchoalveolar lavage fluid by use of a scoring system is considered the most sensitive diagnostic method. Macrophages are classified depending on the degree of cytoplasmic hemosiderin content. The current gold standard is manual grading, which is however monotonous and time-consuming. We evaluated state-of-the-art deep learning-based methods for single cell macrophage classification and compared them against the performance of nine cytology experts and evaluated inter- and intra-observer variability. Additionally, we evaluated object detection methods on a novel data set of 17 completely annotated cytology whole slide images (WSI) containing 78,047 hemosiderophages. Our deep learning-based approach reached a concordance of 0.85, partially exceeding human expert concordance (0.68 to 0.86, mean of 0.73, SD of 0.04). Intra-observer variability was high (0.68 to 0.88) and inter-observer concordance was moderate (Fleiss' kappa = 0.67). Our object detection approach has a mean average precision of 0.66 over the five classes from the whole slide gigapixel image and a computation time of below two minutes. To mitigate the high inter- and intra-rater variability, we propose our automated object detection pipeline, enabling accurate, reproducible and quick EIPH scoring in WSI.}, language = {en} } @article{WilmIhlingMehesetal.2023, author = {Wilm, Frauke and Ihling, Christian and M{\´e}hes, G{\´a}bor and Terracciano, Luigi and Puget, Chlo{\´e} and Klopfleisch, Robert and Sch{\"u}ffler, Peter and Aubreville, Marc and Maier, Andreas and Mrowiec, Thomas and Breininger, Katharina}, title = {Pan-tumor T-lymphocyte detection using deep neural networks: Recommendations for transfer learning in immunohistochemistry}, volume = {2023}, pages = {100301}, journal = {Journal of Pathology Informatics}, number = {14}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2153-3539}, doi = {https://doi.org/10.1016/j.jpi.2023.100301}, year = {2023}, abstract = {The success of immuno-oncology treatments promises long-term cancer remission for an increasing number of patients. The response to checkpoint inhibitor drugs has shown a correlation with the presence of immune cells in the tumor and tumor microenvironment. An in-depth understanding of the spatial localization of immune cells is therefore critical for understanding the tumor's immune landscape and predicting drug response. Computer-aided systems are well suited for efficiently quantifying immune cells in their spatial context. Conventional image analysis approaches are often based on color features and therefore require a high level of manual interaction. More robust image analysis methods based on deep learning are expected to decrease this reliance on human interaction and improve the reproducibility of immune cell scoring. However, these methods require sufficient training data and previous work has reported low robustness of these algorithms when they are tested on out-of-distribution data from different pathology labs or samples from different organs. In this work, we used a new image analysis pipeline to explicitly evaluate the robustness of marker-labeled lymphocyte quantification algorithms depending on the number of training samples before and after being transferred to a new tumor indication. For these experiments, we adapted the RetinaNet architecture for the task of T-lymphocyte detection and employed transfer learning to bridge the domain gap between tumor indications and reduce the annotation costs for unseen domains. On our test set, we achieved human-level performance for almost all tumor indications with an average precision of 0.74 in-domain and 0.72-0.74 cross-domain. From our results, we derive recommendations for model development regarding annotation extent, training sample selection, and label extraction for the development of robust algorithms for immune cell scoring. By extending the task of marker-labeled lymphocyte quantification to a multi-class detection task, the pre-requisite for subsequent analyses, e.g., distinguishing lymphocytes in the tumor stroma from tumor-infiltrating lymphocytes, is met.}, language = {en} } @inproceedings{MarzahlWilmTharunetal.2021, author = {Marzahl, Christian and Wilm, Frauke and Tharun, Lars and Perner, Sven and Kr{\"o}ger, Christine and Voigt, J{\"o}rn and Klopfleisch, Robert and Maier, Andreas and Aubreville, Marc and Breininger, Katharina}, title = {Robust quad-tree based registration on whole slide images}, booktitle = {Proceedings of Machine Learning Research: Proceedings of COMPAY 2021}, number = {156}, publisher = {PMLR}, address = {[s. l.]}, url = {https://proceedings.mlr.press/v156/marzahl21a.html}, pages = {181 -- 190}, year = {2021}, language = {en} } @article{AubrevilleBertramMarzahletal.2020, author = {Aubreville, Marc and Bertram, Christof and Marzahl, Christian and Gurtner, Corinne and Dettwiler, Martina and Schmidt, Anja and Bartenschlager, Florian and Merz, Sophie and Fragoso-Garcia, Marco and Kershaw, Olivia and Klopfleisch, Robert and Maier, Andreas}, title = {Deep learning algorithms out-perform veterinary pathologists in detecting the mitotically most active tumor region}, volume = {10}, pages = {16447}, journal = {Scientific reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-020-73246-2}, year = {2020}, abstract = {Manual count of mitotic figures, which is determined in the tumor region with the highest mitotic activity, is a key parameter of most tumor grading schemes. It can be, however, strongly dependent on the area selection due to uneven mitotic figure distribution in the tumor section. We aimed to assess the question, how significantly the area selection could impact the mitotic count, which has a known high inter-rater disagreement. On a data set of 32 whole slide images of H\&E-stained canine cutaneous mast cell tumor, fully annotated for mitotic figures, we asked eight veterinary pathologists (five board-certified, three in training) to select a field of interest for the mitotic count. To assess the potential difference on the mitotic count, we compared the mitotic count of the selected regions to the overall distribution on the slide. Additionally, we evaluated three deep learning-based methods for the assessment of highest mitotic density: In one approach, the model would directly try to predict the mitotic count for the presented image patches as a regression task. The second method aims at deriving a segmentation mask for mitotic figures, which is then used to obtain a mitotic density. Finally, we evaluated a two-stage object-detection pipeline based on state-of-the-art architectures to identify individual mitotic figures. We found that the predictions by all models were, on average, better than those of the experts. The two-stage object detector performed best and outperformed most of the human pathologists on the majority of tumor cases. The correlation between the predicted and the ground truth mitotic count was also best for this approach (0.963-0.979). Further, we found considerable differences in position selection between pathologists, which could partially explain the high variance that has been reported for the manual mitotic count. To achieve better inter-rater agreement, we propose to use a computer-based area selection for support of the pathologist in the manual mitotic count.}, language = {en} } @inproceedings{MarzahlBertramAubrevilleetal.2020, author = {Marzahl, Christian and Bertram, Christof and Aubreville, Marc and Petrick, Anne and Weiler, Kristina and Gl{\"a}sel, Agnes C. and Fragoso-Garcia, Marco and Merz, Sophie and Bartenschlager, Florian and Hoppe, Judith and Langenhagen, Alina and Jasensky, Anne-Katherine and Voigt, J{\"o}rn and Klopfleisch, Robert and Maier, Andreas}, title = {Are Fast Labeling Methods Reliable? A Case Study of Computer-Aided Expert Annotations on Microscopy Slides}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2020}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-59710-8}, issn = {1611-3349}, doi = {https://doi.org/10.1007/978-3-030-59710-8_3}, pages = {24 -- 32}, year = {2020}, language = {en} } @inproceedings{BertramVetaMarzahletal.2020, author = {Bertram, Christof and Veta, Mitko and Marzahl, Christian and Stathonikos, Nikolas and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Are Pathologist-Defined Labels Reproducible? Comparison of the TUPAC16 Mitotic Figure Dataset with an Alternative Set of Labels}, booktitle = {Interpretable and Annotation-Efficient Learning for Medical Image Computing}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-61166-8}, issn = {1611-3349}, doi = {https://doi.org/10.1007/978-3-030-61166-8_22}, pages = {204 -- 213}, year = {2020}, language = {en} } @inproceedings{StoeveAubrevilleOetteretal.2018, author = {Stoeve, Maike and Aubreville, Marc and Oetter, Nicolai and Knipfer, Christian and Neumann, Helmut and Stelzle, Florian and Maier, Andreas}, title = {Motion Artifact Detection in Confocal Laser Endomicroscopy Images}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018: Algorithmen - Systeme - Anwendungen}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {https://doi.org/10.1007/978-3-662-56537-7_85}, pages = {328 -- 333}, year = {2018}, language = {en} } @unpublished{MarzahlBertramAubrevilleetal.2020, author = {Marzahl, Christian and Bertram, Christof and Aubreville, Marc and Petrick, Anne and Weiler, Kristina and Gl{\"a}sel, Agnes C. and Fragoso-Garcia, Marco and Merz, Sophie and Bartenschlager, Florian and Hoppe, Judith and Langenhagen, Alina and Jasensky, Anne-Katherine and Voigt, J{\"o}rn and Klopfleisch, Robert and Maier, Andreas}, title = {Are Fast Labeling Methods Reliable? A Case Study of Computer-Aided Expert Annotations on Microscopy Slides}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2004.05838}, year = {2020}, language = {en} } @unpublished{BertramVetaMarzahletal.2020, author = {Bertram, Christof and Veta, Mitko and Marzahl, Christian and Stathonikos, Nikolas and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Are pathologist-defined labels reproducible? Comparison of the TUPAC16 mitotic figure dataset with an alternative set of labels}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2007.05351}, year = {2020}, language = {en} } @unpublished{StoeveAubrevilleOetteretal.2018, author = {Stoeve, Maike and Aubreville, Marc and Oetter, Nicolai and Knipfer, Christian and Neumann, Helmut and Stelzle, Florian and Maier, Andreas}, title = {Motion Artifact Detection in Confocal Laser Endomicroscopy Images}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.1711.01117}, year = {2018}, abstract = {Confocal Laser Endomicroscopy (CLE), an optical imaging technique allowing non-invasive examination of the mucosa on a (sub)- cellular level, has proven to be a valuable diagnostic tool in gastroenterology and shows promising results in various anatomical regions including the oral cavity. Recently, the feasibility of automatic carcinoma detection for CLE images of sufficient quality was shown. However, in real world data sets a high amount of CLE images is corrupted by artifacts. Amongst the most prevalent artifact types are motion-induced image deteriorations. In the scope of this work, algorithmic approaches for the automatic detection of motion artifact-tainted image regions were developed. Hence, this work provides an important step towards clinical applicability of automatic carcinoma detection. Both, conventional machine learning and novel, deep learning-based approaches were assessed. The deep learning-based approach outperforms the conventional approaches, attaining an AUC of 0.90.}, language = {en} } @thesis{Maier2021, author = {Maier, Christian}, title = {Carving von fragmentierten Dateien mithilfe von Neuralen Netzwerken}, publisher = {Technische Hochschule Ingolstadt}, address = {Ingolstadt}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:573-22714}, pages = {28}, year = {2021}, abstract = {Carving von Dateien wird zur Dateiwiederherstellung oder in der IT-Forensik genutzt, um verlorene oder verstecke Dateien auf Datentr{\"a}gern wiederherzustellen oder zu finden. Fragmentierte Dateien stellen dabei jedoch ein großes Problem dar, da herk{\"o}mmliche Programme diese nicht zuverl{\"a}ssig von Datentr{\"a}gern mit beliebiger Gr{\"o}ße auslesen k{\"o}nnen. Bei fragmentierten Dateien sind die Daten einer einzelnen Datei nicht an einem St{\"u}ck auf dem Datentr{\"a}ger gespeichert, sondern in mindestens zwei Teilen. Zus{\"a}tzlich muss die Reihenfolge der Teile der Daten nicht dieselbe auf dem Datentr{\"a}ger sein. Dadurch stoßen viele Programme auf ein großes Problem. Sie k{\"o}nnen die zusammengeh{\"o}rende nicht identifizieren und sortieren, damit die Daten nutzbar sind. Die Hilfe von Neuralen Netzwerken k{\"o}nnte die L{\"o}sung zum Carven von fragmentierten Dateien sein, da sie bei komplizierten Problemen sich mit niedriger Fehlerquote bewiesen haben. Trotzdem k{\"o}nnte es sein, dass Neurale Netzwerke nicht mit der großen Anzahl an verschiedenen Dateitypen, die in der Praxis vorliegen, funktionieren, da die m{\"o}gliche Anzahl an Byte-Kombination zu groß sein k{\"o}nnte, um Muster in den Daten zu erkennen. In meiner Bachelorarbeit untersuche ich deshalb, ob es m{\"o}glich ist Carving von fragmentierten Dateien mithilfe von Neuralen Netzwerken zuverl{\"a}ssig zu nutzen. Zun{\"a}chst werde ich hierf{\"u}r die Grundlagen und die Theorie vom klassischen Datei Carving und von k{\"u}nstlichen Neuralen Netzwerken erl{\"a}utern. Anschließend werden Techniken zum Carven von fragmentierten Dateien beschrieben, um zu zeigen, wo meinem Ermessen nach Neurale Netzwerke beim Carving von fragmentierten Dateien n{\"u}tzlich sein k{\"o}nnten. Abschließend erl{\"a}utere ich meine verwendete Methodik und stelle die damit verbundenen Ergebnisse dar.}, language = {de} } @article{WilmFragosoGarciaMarzahletal.2022, author = {Wilm, Frauke and Fragoso-Garcia, Marco and Marzahl, Christian and Qiu, Jingna and Puget, Chlo{\´e} and Diehl, Laura and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas and Breininger, Katharina and Aubreville, Marc}, title = {Pan-tumor CAnine cuTaneous Cancer Histology (CATCH) dataset}, volume = {9}, pages = {588}, journal = {Scientific Data}, publisher = {Springer Nature}, address = {New York}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-022-01692-w}, year = {2022}, abstract = {Due to morphological similarities, the differentiation of histologic sections of cutaneous tumors into individual subtypes can be challenging. Recently, deep learning-based approaches have proven their potential for supporting pathologists in this regard. However, many of these supervised algorithms require a large amount of annotated data for robust development. We present a publicly available dataset of 350 whole slide images of seven different canine cutaneous tumors complemented by 12,424 polygon annotations for 13 histologic classes, including seven cutaneous tumor subtypes. In inter-rater experiments, we show a high consistency of the provided labels, especially for tumor annotations. We further validate the dataset by training a deep neural network for the task of tissue segmentation and tumor subtype classification. We achieve a class-averaged Jaccard coefficient of 0.7047, and 0.9044 for tumor in particular. For classification, we achieve a slide-level accuracy of 0.9857. Since canine cutaneous tumors possess various histologic homologies to human tumors the added value of this dataset is not limited to veterinary pathology but extends to more general fields of application.}, language = {en} } @article{BertramAubrevilleDonovanetal.2021, author = {Bertram, Christof and Aubreville, Marc and Donovan, Taryn A. and Bartel, Alexander and Wilm, Frauke and Marzahl, Christian and Assenmacher, Charles-Antoine and Becker, Kathrin and Bennett, Mark and Corner, Sarah M. and Cossic, Brieuc and Denk, Daniela and Dettwiler, Martina and Garcia Gonzalez, Beatriz and Gurtner, Corinne and Haverkamp, Ann-Kathrin and Heier, Annabelle and Lehmbecker, Annika and Merz, Sophie and Noland, Erica L. and Plog, Stephanie and Schmidt, Anja and Sebastian, Franziska and Sledge, Dodd G. and Smedley, Rebecca C. and Tecilla, Marco and Thaiwong, Tuddow and Fuchs-Baumgartinger, Andrea and Meuten, Donald J. and Breininger, Katharina and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert}, title = {Computer-assisted mitotic count using a deep learning-based algorithm improves interobserver reproducibility and accuracy}, volume = {59}, journal = {Veterinary Pathology}, number = {2}, publisher = {SAGE Publications Inc}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/03009858211067478}, pages = {211 -- 226}, year = {2021}, abstract = {The mitotic count (MC) is an important histological parameter for prognostication of malignant neoplasms. However, it has inter- and intraobserver discrepancies due to difficulties in selecting the region of interest (MC-ROI) and in identifying or classifying mitotic figures (MFs). Recent progress in the field of artificial intelligence has allowed the development of high-performance algorithms that may improve standardization of the MC. As algorithmic predictions are not flawless, computer-assisted review by pathologists may ensure reliability. In the present study, we compared partial (MC-ROI preselection) and full (additional visualization of MF candidates and display of algorithmic confidence values) computer-assisted MC analysis to the routine (unaided) MC analysis by 23 pathologists for whole-slide images of 50 canine cutaneous mast cell tumors (ccMCTs). Algorithmic predictions aimed to assist pathologists in detecting mitotic hotspot locations, reducing omission of MFs, and improving classification against imposters. The interobserver consistency for the MC significantly increased with computer assistance (interobserver correlation coefficient, ICC = 0.92) compared to the unaided approach (ICC = 0.70). Classification into prognostic stratifications had a higher accuracy with computer assistance. The algorithmically preselected hotspot MC-ROIs had a consistently higher MCs than the manually selected MC-ROIs. Compared to a ground truth (developed with immunohistochemistry for phosphohistone H3), pathologist performance in detecting individual MF was augmented when using computer assistance (F1-score of 0.68 increased to 0.79) with a reduction in false negatives by 38\%. The results of this study demonstrate that computer assistance may lead to more reproducible and accurate MCs in ccMCTs.}, language = {en} } @article{MarzahlHillStaytetal.2022, author = {Marzahl, Christian and Hill, Jenny and Stayt, Jason and Bienzle, Dorothee and Welker, Lutz and Wilm, Frauke and Voigt, J{\"o}rn and Aubreville, Marc and Maier, Andreas and Klopfleisch, Robert and Breininger, Katharina and Bertram, Christof}, title = {Inter-species cell detection}, volume = {9}, pages = {269}, journal = {Scientific Data}, subtitle = {datasets on pulmonary hemosiderophages in equine, human and feline specimens}, publisher = {Springer Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-022-01389-0}, year = {2022}, abstract = {Pulmonary hemorrhage (P-Hem) occurs among multiple species and can have various causes. Cytology of bronchoalveolar lavage fluid (BALF) using a 5-tier scoring system of alveolar macrophages based on their hemosiderin content is considered the most sensitive diagnostic method. We introduce a novel, fully annotated multi-species P-Hem dataset, which consists of 74 cytology whole slide images (WSIs) with equine, feline and human samples. To create this high-quality and high-quantity dataset, we developed an annotation pipeline combining human expertise with deep learning and data visualisation techniques. We applied a deep learning-based object detection approach trained on 17 expertly annotated equine WSIs, to the remaining 39 equine, 12 human and 7 feline WSIs. The resulting annotations were semi-automatically screened for errors on multiple types of specialised annotation maps and finally reviewed by a trained pathologist. Our dataset contains a total of 297,383 hemosiderophages classified into five grades. It is one of the largest publicly available WSIs datasets with respect to the number of annotations, the scanned area and the number of species covered.}, language = {en} } @article{BertramMarzahlBarteletal.2022, author = {Bertram, Christof and Marzahl, Christian and Bartel, Alexander and Stayt, Jason and Bonsembiante, Federico and Beeler-Marfisi, Janet and Barton, Ann K. and Brocca, Ginevra and Gelain, Maria Elena and Gl{\"a}sel, Agnes C. and du Preez, Kelly and Weiler, Kristina and Weissenbacher-Lang, Christiane and Breininger, Katharina and Aubreville, Marc and Maier, Andreas and Klopfleisch, Robert and Hill, Jenny}, title = {Cytologic scoring of equine exercise-induced pulmonary hemorrhage}, volume = {60}, journal = {Veterinary Pathology}, subtitle = {Performance of human experts and a deep learning-based algorithm}, number = {1}, publisher = {Sage}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/03009858221137582}, pages = {75 -- 85}, year = {2022}, abstract = {Exercise-induced pulmonary hemorrhage (EIPH) is a relevant respiratory disease in sport horses, which can be diagnosed by examination of bronchoalveolar lavage fluid (BALF) cells using the total hemosiderin score (THS). The aim of this study was to evaluate the diagnostic accuracy and reproducibility of annotators and to validate a deep learning-based algorithm for the THS. Digitized cytological specimens stained for iron were prepared from 52 equine BALF samples. Ten annotators produced a THS for each slide according to published methods. The reference methods for comparing annotator's and algorithmic performance included a ground truth dataset, the mean annotators' THSs, and chemical iron measurements. Results of the study showed that annotators had marked interobserver variability of the THS, which was mostly due to a systematic error between annotators in grading the intracytoplasmatic hemosiderin content of individual macrophages. Regarding overall measurement error between the annotators, 87.7\% of the variance could be reduced by using standardized grades based on the ground truth. The algorithm was highly consistent with the ground truth in assigning hemosiderin grades. Compared with the ground truth THS, annotators had an accuracy of diagnosing EIPH (THS of < or ≥ 75) of 75.7\%, whereas, the algorithm had an accuracy of 92.3\% with no relevant differences in correlation with chemical iron measurements. The results show that deep learning-based algorithms are useful for improving reproducibility and routine applicability of the THS. For THS by experts, a diagnostic uncertainty interval of 40 to 110 is proposed. THSs within this interval have insufficient reproducibility regarding the EIPH diagnosis.}, language = {en} } @inproceedings{WilmBertramMarzahletal.2021, author = {Wilm, Frauke and Bertram, Christof and Marzahl, Christian and Bartel, Alexander and Donovan, Taryn A. and Assenmacher, Charles-Antoine and Becker, Kathrin and Bennett, Mark and Corner, Sarah M. and Cossic, Brieuc and Denk, Daniela and Dettwiler, Martina and Garcia Gonzalez, Beatriz and Gurtner, Corinne and Heier, Annabelle and Lehmbecker, Annika and Merz, Sophie and Plog, Stephanie and Schmidt, Anja and Sebastian, Franziska and Smedley, Rebecca C. and Tecilla, Marco and Thaiwong, Tuddow and Breininger, Katharina and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Influence of inter-annotator variability on automatic mitotic figure assessment}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-658-33198-6}, doi = {https://doi.org/10.1007/978-3-658-33198-6_56}, pages = {241 -- 246}, year = {2021}, language = {en} } @inproceedings{BertramDonovanTecillaetal.2021, author = {Bertram, Christof and Donovan, Taryn A. and Tecilla, Marco and Bartenschlager, Florian and Fragoso-Garcia, Marco and Wilm, Frauke and Marzahl, Christian and Breininger, Katharina and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Dataset on bi- and multi-nucleated tumor cells in canine cutaneous mast cell tumors}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021: Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-33198-6_33}, pages = {134 -- 139}, year = {2021}, language = {en} } @inproceedings{MarzahlBertramWilmetal.2021, author = {Marzahl, Christian and Bertram, Christof and Wilm, Frauke and Voigt, J{\"o}rn and Barton, Ann K. and Klopfleisch, Robert and Breininger, Katharina and Maier, Andreas and Aubreville, Marc}, title = {Cell detection for asthma on partially annotated whole slide images}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021: Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, subtitle = {learning to be EXACT}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-33198-6_36}, pages = {147 -- 152}, year = {2021}, language = {en} } @inproceedings{AubrevilleGoncalvesKnipferetal.2019, author = {Aubreville, Marc and Goncalves, Miguel and Knipfer, Christian and Oetter, Nicolai and W{\"u}rfl, Tobias and Neumann, Helmut and Stelzle, Florian and Bohr, Christopher and Maier, Andreas}, title = {Transferability of deep learning algorithms for malignancy detection in confocal laser endomicroscopy images from different anatomical locations of the upper gastrointestinal tract}, booktitle = {Biomedical Engineering Systems and Technologies}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-29195-2}, issn = {1865-0929}, doi = {https://doi.org/10.1007/978-3-030-29196-9_4}, pages = {67 -- 85}, year = {2019}, language = {en} } @inproceedings{TheelkeWilmMarzahletal.2021, author = {Theelke, Luisa and Wilm, Frauke and Marzahl, Christian and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas and Aubreville, Marc and Breininger, Katharina}, title = {Iterative Cross-Scanner Registration for Whole Slide Images}, booktitle = {2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-0191-3}, issn = {2473-9944}, doi = {https://doi.org/10.1109/ICCVW54120.2021.00071}, pages = {582 -- 590}, year = {2021}, language = {en} } @inproceedings{AubrevilleBertramJabarietal.2020, author = {Aubreville, Marc and Bertram, Christof and Jabari, Samir and Marzahl, Christian and Klopfleisch, Robert and Maier, Andreas}, title = {Inter-species, inter-tissue domain adaptation for mitotic figure assessment}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020, Algorithmen - Systeme - Anwendungen. Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2020 in Berlin}, subtitle = {learning new tricks from old dogs}, editor = {Tolxdorff, Thomas and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, doi = {https://doi.org/10.1007/978-3-658-29267-6_1}, pages = {1 -- 7}, year = {2020}, language = {en} } @article{FragosoGarciaWilmBertrametal.2023, author = {Fragoso-Garcia, Marco and Wilm, Frauke and Bertram, Christof and Merz, Sophie and Schmidt, Anja and Donovan, Taryn A. and Fuchs-Baumgartinger, Andrea and Bartel, Alexander and Marzahl, Christian and Diehl, Laura and Puget, Chloe and Maier, Andreas and Aubreville, Marc and Breininger, Katharina and Klopfleisch, Robert}, title = {Automated diagnosis of 7 canine skin tumors using machine learning on H\&E-stained whole slide images}, volume = {60}, journal = {Veterinary Pathology}, number = {6}, publisher = {SAGE}, address = {London}, issn = {0300-9858}, doi = {https://doi.org/10.1177/03009858231189205}, pages = {865 -- 875}, year = {2023}, abstract = {Microscopic evaluation of hematoxylin and eosin-stained slides is still the diagnostic gold standard for a variety of diseases, including neoplasms. Nevertheless, intra- and interrater variability are well documented among pathologists. So far, computer assistance via automated image analysis has shown potential to support pathologists in improving accuracy and reproducibility of quantitative tasks. In this proof of principle study, we describe a machine-learning-based algorithm for the automated diagnosis of 7 of the most common canine skin tumors: trichoblastoma, squamous cell carcinoma, peripheral nerve sheath tumor, melanoma, histiocytoma, mast cell tumor, and plasmacytoma. We selected, digitized, and annotated 350 hematoxylin and eosin-stained slides (50 per tumor type) to create a database divided into training, n = 245 whole-slide images (WSIs), validation ( n = 35 WSIs), and test sets ( n = 70 WSIs). Full annotations included the 7 tumor classes and 6 normal skin structures. The data set was used to train a convolutional neural network (CNN) for the automatic segmentation of tumor and nontumor classes. Subsequently, the detected tumor regions were classified patch-wise into 1 of the 7 tumor classes. A majority of patches-approach led to a tumor classification accuracy of the network on the slide-level of 95\% (133/140 WSIs), with a patch-level precision of 85\%. The same 140 WSIs were provided to 6 experienced pathologists for diagnosis, who achieved a similar slide-level accuracy of 98\% (137/140 correct majority votes). Our results highlight the feasibility of artificial intelligence-based methods as a support tool in diagnostic oncologic pathology with future applications in other species and tumor types.}, language = {en} } @article{AubrevilleStathonikosBertrametal.2022, author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof and Klopfleisch, Robert and Hoeve, Natalie ter and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Breen, Jack and Ravikumar, Nishant and Chung, Youjin and Park, Jinah and Nateghi, Ramin and Pourakpour, Fattaneh and Fick, Rutger H. J. and Ben Hadj, Saima and Jahanifar, Mostafa and Shepard, Adam and Dexl, Jakob and Wittenberg, Thomas and Kondo, Satoshi and Lafarge, Maxime W. and Kolezer, Viktor H. and Liang, Jingtang and Wang, Yubo and Long, Xi and Liu, Jingxin and Razavi, Salar and Khademi, April and Yang, Sen and Wang, Xiyue and Erber, Ramona and Klang, Andrea and Lipnik, Karoline and Bolfa, Pompei and Dark, Michael and Wasinger, Gabriel and Veta, Mitko and Breininger, Katharina}, title = {Mitosis domain generalization in histopathology images — The MIDOG challenge}, volume = {2023}, pages = {102699}, journal = {Medical Image Analysis}, number = {84}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1361-8415}, doi = {https://doi.org/10.1016/j.media.2022.102699}, year = {2022}, language = {en} }