@article{BertramAubrevilleGurtneretal.2020, author = {Bertram, Christof and Aubreville, Marc and Gurtner, Corinne and Bartel, Alexander and Corner, Sarah M. and Dettwiler, Martina and Kershaw, Olivia and Noland, Erica L. and Schmidt, Anja and Sledge, Dodd G. and Smedley, Rebecca C. and Thaiwong, Tuddow and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert}, title = {Computerized calculation of mitotic count distribution in canine cutaneous mast cell tumor sections}, volume = {57}, journal = {Veterinary pathology}, subtitle = {mitotic count is area dependent}, number = {2}, publisher = {Sage}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/0300985819890686}, pages = {214 -- 226}, year = {2020}, language = {en} } @inproceedings{AubrevilleBertramKlopfleischetal.2018, author = {Aubreville, Marc and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas}, title = {SlideRunner}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018: Algorithmen - Systeme - Anwendungen}, subtitle = {a tool for massive cell annotations in whole slide images}, publisher = {Springer Vieweg}, address = {Berlin}, isbn = {978-3-662-56537-7}, doi = {https://doi.org/10.1007/978-3-662-56537-7_81}, pages = {309 -- 314}, year = {2018}, abstract = {Large-scale image data such as digital whole-slide histology images pose a challenging task at annotation software solutions. Today, a number of good solutions with varying scopes exist. For cell annotation, however, we find that many do not match the prerequisites for fast annotations. Especially in the field of mitosis detection, it is assumed that detection accuracy could significantly benefit from larger annotation databases that are currently however very troublesome to produce. Further, multiple independent (blind) expert labels are a big asset for such databases, yet there is currently no tool for this kind of annotation available. To ease this tedious process of expert annotation and grading, we introduce SlideRunner, an open source annotation and visualization tool for digital histopathology, developed in close cooperation with two pathologists. SlideRunner is capable of setting annotations like object centers (for e.g. cells) as well as object boundaries (e.g. for tumor outlines). It provides single-click annotations as well as a blind mode for multi-annotations, where the expert is directly shown the microscopy image containing the cells that he has not yet rated.}, language = {en} } @inproceedings{AubrevilleKrappmannBertrametal.2017, author = {Aubreville, Marc and Krappmann, Maximilian and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas}, title = {A guided spatial transformer network for histology cell differentiation}, booktitle = {VCBM '17: Proceedings of the Eurographics Workshop on Visual Computing for Biology and Medicine}, publisher = {Eurographics Association}, address = {Goslar}, doi = {https://doi.org/10.2312/vcbm.20171233}, pages = {21 -- 25}, year = {2017}, language = {en} } @inproceedings{KrappmannAubrevilleMaieretal.2018, author = {Krappmann, Maximilian and Aubreville, Marc and Maier, Andreas and Bertram, Christof and Klopfleisch, Robert}, title = {Classification of Mitotic Cells}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2018}, subtitle = {Potentials Beyond the Limits of Small Data Sets}, editor = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer}, address = {Berlin}, isbn = {978-3-662-56536-0}, doi = {https://doi.org/10.1007/978-3-662-56537-7_66}, pages = {245 -- 250}, year = {2018}, language = {en} } @article{AubrevilleBertramDonovanetal.2020, author = {Aubreville, Marc and Bertram, Christof and Donovan, Taryn A. and Marzahl, Christian and Maier, Andreas and Klopfleisch, Robert}, title = {A completely annotated whole slide image dataset of canine breast cancer to aid human breast cancer research}, volume = {7}, pages = {417}, journal = {Scientific data}, publisher = {Springer Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-020-00756-z}, year = {2020}, abstract = {Canine mammary carcinoma (CMC) has been used as a model to investigate the pathogenesis of human breast cancer and the same grading scheme is commonly used to assess tumor malignancy in both. One key component of this grading scheme is the density of mitotic figures (MF). Current publicly available datasets on human breast cancer only provide annotations for small subsets of whole slide images (WSIs). We present a novel dataset of 21 WSIs of CMC completely annotated for MF. For this, a pathologist screened all WSIs for potential MF and structures with a similar appearance. A second expert blindly assigned labels, and for non-matching labels, a third expert assigned the final labels. Additionally, we used machine learning to identify previously undetected MF. Finally, we performed representation learning and two-dimensional projection to further increase the consistency of the annotations. Our dataset consists of 13,907 MF and 36,379 hard negatives. We achieved a mean F1-score of 0.791 on the test set and of up to 0.696 on a human breast cancer dataset.}, language = {en} } @article{BertramAubrevilleMarzahletal.2019, author = {Bertram, Christof and Aubreville, Marc and Marzahl, Christian and Maier, Andreas and Klopfleisch, Robert}, title = {A large-scale dataset for mitotic figure assessment on whole slide images of canine cutaneous mast cell tumor}, volume = {6}, pages = {274}, journal = {Scientific data}, publisher = {Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-019-0290-4}, year = {2019}, abstract = {We introduce a novel, large-scale dataset for microscopy cell annotations. The dataset includes 32 whole slide images (WSI) of canine cutaneous mast cell tumors, selected to include both low grade cases as well as high grade cases. The slides have been completely annotated for mitotic figures and we provide secondary annotations for neoplastic mast cells, inflammatory granulocytes, and mitotic figure look-alikes. Additionally to a blinded two-expert manual annotation with consensus, we provide an algorithm-aided dataset, where potentially missed mitotic figures were detected by a deep neural network and subsequently assessed by two human experts. We included 262,481 annotations in total, out of which 44,880 represent mitotic figures. For algorithmic validation, we used a customized RetinaNet approach, followed by a cell classification network. We find F1-Scores of 0.786 and 0.820 for the manually labelled and the algorithm-aided dataset, respectively. The dataset provides, for the first time, WSIs completely annotated for mitotic figures and thus enables assessment of mitosis detection algorithms on complete WSIs as well as region of interest detection algorithms.}, language = {en} } @article{BertramAubrevilleGurtneretal.2020, author = {Bertram, Christof and Aubreville, Marc and Gurtner, Corinne and Bartel, Alexander and Corner, Sarah M. and Dettwiler, Martina and Kershaw, Olivia and Noland, Erica L. and Schmidt, Anja and Sledge, Dodd G. and Smedley, Rebecca C. and Thaiwong, Tuddow and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert}, title = {Mitotic count in canine cutaneous mast cell tumours}, volume = {2020}, journal = {Journal of Comparative Pathology}, subtitle = {not accurate but reproducible}, number = {174}, publisher = {Elsevier}, address = {London}, issn = {1532-3129}, doi = {https://doi.org/10.1016/j.jcpa.2019.10.015}, pages = {143}, year = {2020}, language = {en} } @article{MarzahlAubrevilleBertrametal.2020, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Stayt, Jason and Jasensky, Anne-Katherine and Bartenschlager, Florian and Fragoso-Garcia, Marco and Barton, Ann K. and Elsemann, Svenja and Jabari, Samir and Krauth, Jens and Madhu, Prathmesh and Voigt, J{\"o}rn and Hill, Jenny and Klopfleisch, Robert and Maier, Andreas}, title = {Deep Learning-based quantification of pulmonary hemosiderophages in cytology slides}, volume = {10}, pages = {9795}, journal = {Scientific reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-020-65958-2}, year = {2020}, abstract = {Exercise-induced pulmonary hemorrhage (EIPH) is a common condition in sport horses with negative impact on performance. Cytology of bronchoalveolar lavage fluid by use of a scoring system is considered the most sensitive diagnostic method. Macrophages are classified depending on the degree of cytoplasmic hemosiderin content. The current gold standard is manual grading, which is however monotonous and time-consuming. We evaluated state-of-the-art deep learning-based methods for single cell macrophage classification and compared them against the performance of nine cytology experts and evaluated inter- and intra-observer variability. Additionally, we evaluated object detection methods on a novel data set of 17 completely annotated cytology whole slide images (WSI) containing 78,047 hemosiderophages. Our deep learning-based approach reached a concordance of 0.85, partially exceeding human expert concordance (0.68 to 0.86, mean of 0.73, SD of 0.04). Intra-observer variability was high (0.68 to 0.88) and inter-observer concordance was moderate (Fleiss' kappa = 0.67). Our object detection approach has a mean average precision of 0.66 over the five classes from the whole slide gigapixel image and a computation time of below two minutes. To mitigate the high inter- and intra-rater variability, we propose our automated object detection pipeline, enabling accurate, reproducible and quick EIPH scoring in WSI.}, language = {en} } @article{FragosoGarciaWilmBertrametal.2023, author = {Fragoso-Garcia, Marco and Wilm, Frauke and Bertram, Christof and Merz, Sophie and Schmidt, Anja and Donovan, Taryn A. and Fuchs-Baumgartinger, Andrea and Bartel, Alexander and Marzahl, Christian and Diehl, Laura and Puget, Chloe and Maier, Andreas and Aubreville, Marc and Breininger, Katharina and Klopfleisch, Robert}, title = {Automated diagnosis of 7 canine skin tumors using machine learning on H\&E-stained whole slide images}, volume = {60}, journal = {Veterinary Pathology}, number = {6}, publisher = {SAGE}, address = {London}, issn = {0300-9858}, doi = {https://doi.org/10.1177/03009858231189205}, pages = {865 -- 875}, year = {2023}, abstract = {Microscopic evaluation of hematoxylin and eosin-stained slides is still the diagnostic gold standard for a variety of diseases, including neoplasms. Nevertheless, intra- and interrater variability are well documented among pathologists. So far, computer assistance via automated image analysis has shown potential to support pathologists in improving accuracy and reproducibility of quantitative tasks. In this proof of principle study, we describe a machine-learning-based algorithm for the automated diagnosis of 7 of the most common canine skin tumors: trichoblastoma, squamous cell carcinoma, peripheral nerve sheath tumor, melanoma, histiocytoma, mast cell tumor, and plasmacytoma. We selected, digitized, and annotated 350 hematoxylin and eosin-stained slides (50 per tumor type) to create a database divided into training, n = 245 whole-slide images (WSIs), validation ( n = 35 WSIs), and test sets ( n = 70 WSIs). Full annotations included the 7 tumor classes and 6 normal skin structures. The data set was used to train a convolutional neural network (CNN) for the automatic segmentation of tumor and nontumor classes. Subsequently, the detected tumor regions were classified patch-wise into 1 of the 7 tumor classes. A majority of patches-approach led to a tumor classification accuracy of the network on the slide-level of 95\% (133/140 WSIs), with a patch-level precision of 85\%. The same 140 WSIs were provided to 6 experienced pathologists for diagnosis, who achieved a similar slide-level accuracy of 98\% (137/140 correct majority votes). Our results highlight the feasibility of artificial intelligence-based methods as a support tool in diagnostic oncologic pathology with future applications in other species and tumor types.}, language = {en} } @article{AubrevilleBertramMarzahletal.2020, author = {Aubreville, Marc and Bertram, Christof and Marzahl, Christian and Gurtner, Corinne and Dettwiler, Martina and Schmidt, Anja and Bartenschlager, Florian and Merz, Sophie and Fragoso-Garcia, Marco and Kershaw, Olivia and Klopfleisch, Robert and Maier, Andreas}, title = {Deep learning algorithms out-perform veterinary pathologists in detecting the mitotically most active tumor region}, volume = {10}, pages = {16447}, journal = {Scientific reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-020-73246-2}, year = {2020}, abstract = {Manual count of mitotic figures, which is determined in the tumor region with the highest mitotic activity, is a key parameter of most tumor grading schemes. It can be, however, strongly dependent on the area selection due to uneven mitotic figure distribution in the tumor section. We aimed to assess the question, how significantly the area selection could impact the mitotic count, which has a known high inter-rater disagreement. On a data set of 32 whole slide images of H\&E-stained canine cutaneous mast cell tumor, fully annotated for mitotic figures, we asked eight veterinary pathologists (five board-certified, three in training) to select a field of interest for the mitotic count. To assess the potential difference on the mitotic count, we compared the mitotic count of the selected regions to the overall distribution on the slide. Additionally, we evaluated three deep learning-based methods for the assessment of highest mitotic density: In one approach, the model would directly try to predict the mitotic count for the presented image patches as a regression task. The second method aims at deriving a segmentation mask for mitotic figures, which is then used to obtain a mitotic density. Finally, we evaluated a two-stage object-detection pipeline based on state-of-the-art architectures to identify individual mitotic figures. We found that the predictions by all models were, on average, better than those of the experts. The two-stage object detector performed best and outperformed most of the human pathologists on the majority of tumor cases. The correlation between the predicted and the ground truth mitotic count was also best for this approach (0.963-0.979). Further, we found considerable differences in position selection between pathologists, which could partially explain the high variance that has been reported for the manual mitotic count. To achieve better inter-rater agreement, we propose to use a computer-based area selection for support of the pathologist in the manual mitotic count.}, language = {en} } @inproceedings{MarzahlBertramAubrevilleetal.2020, author = {Marzahl, Christian and Bertram, Christof and Aubreville, Marc and Petrick, Anne and Weiler, Kristina and Gl{\"a}sel, Agnes C. and Fragoso-Garcia, Marco and Merz, Sophie and Bartenschlager, Florian and Hoppe, Judith and Langenhagen, Alina and Jasensky, Anne-Katherine and Voigt, J{\"o}rn and Klopfleisch, Robert and Maier, Andreas}, title = {Are Fast Labeling Methods Reliable? A Case Study of Computer-Aided Expert Annotations on Microscopy Slides}, booktitle = {Medical Image Computing and Computer Assisted Intervention - MICCAI 2020}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-59710-8}, issn = {1611-3349}, doi = {https://doi.org/10.1007/978-3-030-59710-8_3}, pages = {24 -- 32}, year = {2020}, language = {en} } @inproceedings{BertramVetaMarzahletal.2020, author = {Bertram, Christof and Veta, Mitko and Marzahl, Christian and Stathonikos, Nikolas and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Are Pathologist-Defined Labels Reproducible? Comparison of the TUPAC16 Mitotic Figure Dataset with an Alternative Set of Labels}, booktitle = {Interpretable and Annotation-Efficient Learning for Medical Image Computing}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-61166-8}, issn = {1611-3349}, doi = {https://doi.org/10.1007/978-3-030-61166-8_22}, pages = {204 -- 213}, year = {2020}, language = {en} } @inproceedings{AubrevilleBertramKlopfleischetal.2019, author = {Aubreville, Marc and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas}, title = {Field of Interest Proposal for Augmented Mitotic Cell Count}, volume = {2}, booktitle = {Proceedings of the 12th International Joint Conference on Biomedical Engineering Systems and Technologies - BIOIMAGING}, subtitle = {Comparison of Two Convolutional Networks}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-353-7}, issn = {2184-4305}, doi = {https://doi.org/10.5220/0007365700300037}, pages = {30 -- 37}, year = {2019}, language = {en} } @unpublished{MarzahlBertramAubrevilleetal.2020, author = {Marzahl, Christian and Bertram, Christof and Aubreville, Marc and Petrick, Anne and Weiler, Kristina and Gl{\"a}sel, Agnes C. and Fragoso-Garcia, Marco and Merz, Sophie and Bartenschlager, Florian and Hoppe, Judith and Langenhagen, Alina and Jasensky, Anne-Katherine and Voigt, J{\"o}rn and Klopfleisch, Robert and Maier, Andreas}, title = {Are Fast Labeling Methods Reliable? A Case Study of Computer-Aided Expert Annotations on Microscopy Slides}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2004.05838}, year = {2020}, language = {en} } @unpublished{BertramVetaMarzahletal.2020, author = {Bertram, Christof and Veta, Mitko and Marzahl, Christian and Stathonikos, Nikolas and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Are pathologist-defined labels reproducible? Comparison of the TUPAC16 mitotic figure dataset with an alternative set of labels}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2007.05351}, year = {2020}, language = {en} } @inproceedings{GanzBertramKlopfleischetal.2022, author = {Ganz, Jonathan and Bertram, Christof and Klopfleisch, Robert and Jabari, Samir and Breininger, Katharina and Aubreville, Marc}, title = {Classification of visibility in multi-stain microscopy images}, booktitle = {Medical Imaging with Deep Learning: MIDL 2022 Short Papers}, url = {https://openreview.net/forum?id=-GsA-mUVmm}, year = {2022}, language = {en} } @unpublished{HaghoferParlakBarteletal.2023, author = {Haghofer, Andreas and Parlak, Eda and Bartel, Alexander and Donovan, Taryn A. and Assenmacher, Charles-Antoine and Bolfa, Pompei and Dark, Michael J. and Fuchs-Baumgartinger, Andrea and Klang, Andrea and J{\"a}ger, Kathrin and Klopfleisch, Robert and Merz, Sophie and Richter, Barbara and Schulman, F. Yvonne and Ganz, Jonathan and Scharinger, Josef and Aubreville, Marc and Winkler, Stephan M. and Kiupel, Matti and Bertram, Christof}, title = {Nuclear Morphometry using a Deep Learning-based Algorithm has Prognostic Relevance for Canine Cutaneous Mast Cell Tumors}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2309.15031}, year = {2023}, abstract = {Variation in nuclear size and shape is an important criterion of malignancy for many tumor types; however, categorical estimates by pathologists have poor reproducibility. Measurements of nuclear characteristics (morphometry) can improve reproducibility, but manual methods are time consuming. In this study, we evaluated fully automated morphometry using a deep learning-based algorithm in 96 canine cutaneous mast cell tumors with information on patient survival. Algorithmic morphometry was compared with karyomegaly estimates by 11 pathologists, manual nuclear morphometry of 12 cells by 9 pathologists, and the mitotic count as a benchmark. The prognostic value of automated morphometry was high with an area under the ROC curve regarding the tumor-specific survival of 0.943 (95\% CI: 0.889 - 0.996) for the standard deviation (SD) of nuclear area, which was higher than manual morphometry of all pathologists combined (0.868, 95\% CI: 0.737 - 0.991) and the mitotic count (0.885, 95\% CI: 0.765 - 1.00). At the proposed thresholds, the hazard ratio for algorithmic morphometry (SD of nuclear area ≥9.0μm2) was 18.3 (95\% CI: 5.0 - 67.1), for manual morphometry (SD of nuclear area ≥10.9μm2) 9.0 (95\% CI: 6.0 - 13.4), for karyomegaly estimates 7.6 (95\% CI: 5.7 - 10.1), and for the mitotic count 30.5 (95\% CI: 7.8 - 118.0). Inter-rater reproducibility for karyomegaly estimates was fair (κ = 0.226) with highly variable sensitivity/specificity values for the individual pathologists. Reproducibility for manual morphometry (SD of nuclear area) was good (ICC = 0.654). This study supports the use of algorithmic morphometry as a prognostic test to overcome the limitations of estimates and manual measurements.}, language = {en} } @article{WilmFragosoGarciaMarzahletal.2022, author = {Wilm, Frauke and Fragoso-Garcia, Marco and Marzahl, Christian and Qiu, Jingna and Puget, Chlo{\´e} and Diehl, Laura and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas and Breininger, Katharina and Aubreville, Marc}, title = {Pan-tumor CAnine cuTaneous Cancer Histology (CATCH) dataset}, volume = {9}, pages = {588}, journal = {Scientific Data}, publisher = {Springer Nature}, address = {New York}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-022-01692-w}, year = {2022}, abstract = {Due to morphological similarities, the differentiation of histologic sections of cutaneous tumors into individual subtypes can be challenging. Recently, deep learning-based approaches have proven their potential for supporting pathologists in this regard. However, many of these supervised algorithms require a large amount of annotated data for robust development. We present a publicly available dataset of 350 whole slide images of seven different canine cutaneous tumors complemented by 12,424 polygon annotations for 13 histologic classes, including seven cutaneous tumor subtypes. In inter-rater experiments, we show a high consistency of the provided labels, especially for tumor annotations. We further validate the dataset by training a deep neural network for the task of tissue segmentation and tumor subtype classification. We achieve a class-averaged Jaccard coefficient of 0.7047, and 0.9044 for tumor in particular. For classification, we achieve a slide-level accuracy of 0.9857. Since canine cutaneous tumors possess various histologic homologies to human tumors the added value of this dataset is not limited to veterinary pathology but extends to more general fields of application.}, language = {en} } @article{BertramAubrevilleDonovanetal.2021, author = {Bertram, Christof and Aubreville, Marc and Donovan, Taryn A. and Bartel, Alexander and Wilm, Frauke and Marzahl, Christian and Assenmacher, Charles-Antoine and Becker, Kathrin and Bennett, Mark and Corner, Sarah M. and Cossic, Brieuc and Denk, Daniela and Dettwiler, Martina and Garcia Gonzalez, Beatriz and Gurtner, Corinne and Haverkamp, Ann-Kathrin and Heier, Annabelle and Lehmbecker, Annika and Merz, Sophie and Noland, Erica L. and Plog, Stephanie and Schmidt, Anja and Sebastian, Franziska and Sledge, Dodd G. and Smedley, Rebecca C. and Tecilla, Marco and Thaiwong, Tuddow and Fuchs-Baumgartinger, Andrea and Meuten, Donald J. and Breininger, Katharina and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert}, title = {Computer-assisted mitotic count using a deep learning-based algorithm improves interobserver reproducibility and accuracy}, volume = {59}, journal = {Veterinary Pathology}, number = {2}, publisher = {SAGE Publications Inc}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/03009858211067478}, pages = {211 -- 226}, year = {2021}, abstract = {The mitotic count (MC) is an important histological parameter for prognostication of malignant neoplasms. However, it has inter- and intraobserver discrepancies due to difficulties in selecting the region of interest (MC-ROI) and in identifying or classifying mitotic figures (MFs). Recent progress in the field of artificial intelligence has allowed the development of high-performance algorithms that may improve standardization of the MC. As algorithmic predictions are not flawless, computer-assisted review by pathologists may ensure reliability. In the present study, we compared partial (MC-ROI preselection) and full (additional visualization of MF candidates and display of algorithmic confidence values) computer-assisted MC analysis to the routine (unaided) MC analysis by 23 pathologists for whole-slide images of 50 canine cutaneous mast cell tumors (ccMCTs). Algorithmic predictions aimed to assist pathologists in detecting mitotic hotspot locations, reducing omission of MFs, and improving classification against imposters. The interobserver consistency for the MC significantly increased with computer assistance (interobserver correlation coefficient, ICC = 0.92) compared to the unaided approach (ICC = 0.70). Classification into prognostic stratifications had a higher accuracy with computer assistance. The algorithmically preselected hotspot MC-ROIs had a consistently higher MCs than the manually selected MC-ROIs. Compared to a ground truth (developed with immunohistochemistry for phosphohistone H3), pathologist performance in detecting individual MF was augmented when using computer assistance (F1-score of 0.68 increased to 0.79) with a reduction in false negatives by 38\%. The results of this study demonstrate that computer assistance may lead to more reproducible and accurate MCs in ccMCTs.}, language = {en} } @article{MarzahlAubrevilleBertrametal.2021, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Maier, Jennifer and Bergler, Christian and Kr{\"o}ger, Christine and Voigt, J{\"o}rn and Breininger, Katharina and Klopfleisch, Robert and Maier, Andreas}, title = {EXACT: a collaboration toolset for algorithm-aided annotation of images with annotation version control}, volume = {11}, pages = {4343}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-021-83827-4}, year = {2021}, abstract = {In many research areas, scientific progress is accelerated by multidisciplinary access to image data and their interdisciplinary annotation. However, keeping track of these annotations to ensure a high-quality multi-purpose data set is a challenging and labour intensive task. We developed the open-source online platform EXACT (EXpert Algorithm Collaboration Tool) that enables the collaborative interdisciplinary analysis of images from different domains online and offline. EXACT supports multi-gigapixel medical whole slide images as well as image series with thousands of images. The software utilises a flexible plugin system that can be adapted to diverse applications such as counting mitotic figures with a screening mode, finding false annotations on a novel validation view, or using the latest deep learning image analysis technologies. This is combined with a version control system which makes it possible to keep track of changes in the data sets and, for example, to link the results of deep learning experiments to specific data set versions. EXACT is freely available and has already been successfully applied to a broad range of annotation tasks, including highly diverse applications like deep learning supported cytology scoring, interdisciplinary multi-centre whole slide image tumour annotation, and highly specialised whale sound spectroscopy clustering.}, language = {en} } @unpublished{AubrevilleStathonikosDonovanetal.2023, author = {Aubreville, Marc and Stathonikos, Nikolas and Donovan, Taryn A. and Klopfleisch, Robert and Ganz, Jonathan and Ammeling, Jonas and Wilm, Frauke and Veta, Mitko and Jabari, Samir and Eckstein, Markus and Annuscheit, Jonas and Krumnow, Christian and Bozaba, Engin and Cayir, Sercan and Gu, Hongyan and Chen, Xiang and Jahanifar, Mostafa and Shephard, Adam and Kondo, Satoshi and Kasai, Satoshi and Kotte, Sujatha and Saipradeep, Vangala and Lafarge, Maxime W. and Koelzer, Viktor H. and Wang, Ziyue and Zhang, Yongbing and Yang, Sen and Wang, Xiyue and Breininger, Katharina and Bertram, Christof}, title = {Domain generalization across tumor types, laboratories, and species - Insights from the 2022 edition of the Mitosis Domain Generalization Challenge}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2309.15589}, year = {2023}, abstract = {Recognition of mitotic figures in histologic tumor specimens is highly relevant to patient outcome assessment. This task is challenging for algorithms and human experts alike, with deterioration of algorithmic performance under shifts in image representations. Considerable covariate shifts occur when assessment is performed on different tumor types, images are acquired using different digitization devices, or specimens are produced in different laboratories. This observation motivated the inception of the 2022 challenge on MItosis Domain Generalization (MIDOG 2022). The challenge provided annotated histologic tumor images from six different domains and evaluated the algorithmic approaches for mitotic figure detection provided by nine challenge participants on ten independent domains. Ground truth for mitotic figure detection was established in two ways: a three-expert consensus and an independent, immunohistochemistry-assisted set of labels. This work represents an overview of the challenge tasks, the algorithmic strategies employed by the participants, and potential factors contributing to their success. With an F1 score of 0.764 for the top-performing team, we summarize that domain generalization across various tumor domains is possible with today's deep learning-based recognition pipelines. When assessed against the immunohistochemistry-assisted reference standard, all methods resulted in reduced recall scores, but with only minor changes in the order of participants in the ranking.}, language = {en} } @article{HaghoferFuchsBaumgartingerLipniketal.2023, author = {Haghofer, Andreas and Fuchs-Baumgartinger, Andrea and Lipnik, Karoline and Klopfleisch, Robert and Aubreville, Marc and Scharinger, Josef and Weissenb{\"o}ck, Herbert and Winkler, Stephan M. and Bertram, Christof}, title = {Histological classification of canine and feline lymphoma using a modular approach based on deep learning and advanced image processing}, volume = {13}, pages = {19436}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-023-46607-w}, year = {2023}, abstract = {AbstractHistopathological examination of tissue samples is essential for identifying tumor malignancy and the diagnosis of different types of tumor. In the case of lymphoma classification, nuclear size of the neoplastic lymphocytes is one of the key features to differentiate the different subtypes. Based on the combination of artificial intelligence and advanced image processing, we provide a workflow for the classification of lymphoma with regards to their nuclear size (small, intermediate, and large). As the baseline for our workflow testing, we use a Unet++ model trained on histological images of canine lymphoma with individually labeled nuclei. As an alternative to the Unet++, we also used a publicly available pre-trained and unmodified instance segmentation model called Stardist to demonstrate that our modular classification workflow can be combined with different types of segmentation models if they can provide proper nuclei segmentation. Subsequent to nuclear segmentation, we optimize algorithmic parameters for accurate classification of nuclear size using a newly derived reference size and final image classification based on a pathologists-derived ground truth. Our image classification module achieves a classification accuracy of up to 92\% on canine lymphoma data. Compared to the accuracy ranging from 66.67 to 84\% achieved using measurements provided by three individual pathologists, our algorithm provides a higher accuracy level and reproducible results. Our workflow also demonstrates a high transferability to feline lymphoma, as shown by its accuracy of up to 84.21\%, even though our workflow was not optimized for feline lymphoma images. By determining the nuclear size distribution in tumor areas, our workflow can assist pathologists in subtyping lymphoma based on the nuclei size and potentially improve reproducibility. Our proposed approach is modular and comprehensible, thus allowing adaptation for specific tasks and increasing the users' trust in computer-assisted image classification.}, language = {en} } @unpublished{AmmelingHeckerGanzetal.2023, author = {Ammeling, Jonas and Hecker, Moritz and Ganz, Jonathan and Donovan, Taryn A. and Klopfleisch, Robert and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Automated Volume Corrected Mitotic Index Calculation Through Annotation-Free Deep Learning using Immunohistochemistry as Reference Standard}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2311.08949}, year = {2023}, abstract = {The volume-corrected mitotic index (M/V-Index) was shown to provide prognostic value in invasive breast carcinomas. However, despite its prognostic significance, it is not established as the standard method for assessing aggressive biological behaviour, due to the high additional workload associated with determining the epithelial proportion. In this work, we show that using a deep learning pipeline solely trained with an annotation-free, immunohistochemistry-based approach, provides accurate estimations of epithelial segmentation in canine breast carcinomas. We compare our automatic framework with the manually annotated M/V-Index in a study with three board-certified pathologists. Our results indicate that the deep learning-based pipeline shows expert-level performance, while providing time efficiency and reproducibility.}, language = {en} } @article{MarzahlHillStaytetal.2022, author = {Marzahl, Christian and Hill, Jenny and Stayt, Jason and Bienzle, Dorothee and Welker, Lutz and Wilm, Frauke and Voigt, J{\"o}rn and Aubreville, Marc and Maier, Andreas and Klopfleisch, Robert and Breininger, Katharina and Bertram, Christof}, title = {Inter-species cell detection}, volume = {9}, pages = {269}, journal = {Scientific Data}, subtitle = {datasets on pulmonary hemosiderophages in equine, human and feline specimens}, publisher = {Springer Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-022-01389-0}, year = {2022}, abstract = {Pulmonary hemorrhage (P-Hem) occurs among multiple species and can have various causes. Cytology of bronchoalveolar lavage fluid (BALF) using a 5-tier scoring system of alveolar macrophages based on their hemosiderin content is considered the most sensitive diagnostic method. We introduce a novel, fully annotated multi-species P-Hem dataset, which consists of 74 cytology whole slide images (WSIs) with equine, feline and human samples. To create this high-quality and high-quantity dataset, we developed an annotation pipeline combining human expertise with deep learning and data visualisation techniques. We applied a deep learning-based object detection approach trained on 17 expertly annotated equine WSIs, to the remaining 39 equine, 12 human and 7 feline WSIs. The resulting annotations were semi-automatically screened for errors on multiple types of specialised annotation maps and finally reviewed by a trained pathologist. Our dataset contains a total of 297,383 hemosiderophages classified into five grades. It is one of the largest publicly available WSIs datasets with respect to the number of annotations, the scanned area and the number of species covered.}, language = {en} } @unpublished{PugetGanzOstermaieretal.2024, author = {Puget, Chlo{\´e} and Ganz, Jonathan and Ostermaier, Julian and Konrad, Thomas and Parlak, Eda and Bertram, Christof and Kiupel, Matti and Breininger, Katharina and Aubreville, Marc and Klopfleisch, Robert}, title = {Deep Learning model predicts the c-Kit-11 mutational status of canine cutaneous mast cell tumors by HE stained histological slides}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2401.06169}, year = {2024}, abstract = {Numerous prognostic factors are currently assessed histopathologically in biopsies of canine mast cell tumors to evaluate clinical behavior. In addition, PCR analysis of the c-Kit exon 11 mutational status is often performed to evaluate the potential success of a tyrosine kinase inhibitor therapy. This project aimed at training deep learning models (DLMs) to identify the c-Kit-11 mutational status of MCTs solely based on morphology without additional molecular analysis. HE slides of 195 mutated and 173 non-mutated tumors were stained consecutively in two different laboratories and scanned with three different slide scanners. This resulted in six different datasets (stain-scanner variations) of whole slide images. DLMs were trained with single and mixed datasets and their performances was assessed under scanner and staining domain shifts. The DLMs correctly classified HE slides according to their c-Kit 11 mutation status in, on average, 87\% of cases for the best-suited stain-scanner variant. A relevant performance drop could be observed when the stain-scanner combination of the training and test dataset differed. Multi-variant datasets improved the average accuracy but did not reach the maximum accuracy of algorithms trained and tested on the same stain-scanner variant. In summary, DLM-assisted morphological examination of MCTs can predict c-Kit-exon 11 mutational status of MCTs with high accuracy. However, the recognition performance is impeded by a change of scanner or staining protocol. Larger data sets with higher numbers of scans originating from different laboratories and scanners may lead to more robust DLMs to identify c-Kit mutations in HE slides.}, language = {en} } @article{BertramMarzahlBarteletal.2022, author = {Bertram, Christof and Marzahl, Christian and Bartel, Alexander and Stayt, Jason and Bonsembiante, Federico and Beeler-Marfisi, Janet and Barton, Ann K. and Brocca, Ginevra and Gelain, Maria Elena and Gl{\"a}sel, Agnes C. and du Preez, Kelly and Weiler, Kristina and Weissenbacher-Lang, Christiane and Breininger, Katharina and Aubreville, Marc and Maier, Andreas and Klopfleisch, Robert and Hill, Jenny}, title = {Cytologic scoring of equine exercise-induced pulmonary hemorrhage}, volume = {60}, journal = {Veterinary Pathology}, subtitle = {Performance of human experts and a deep learning-based algorithm}, number = {1}, publisher = {Sage}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/03009858221137582}, pages = {75 -- 85}, year = {2022}, abstract = {Exercise-induced pulmonary hemorrhage (EIPH) is a relevant respiratory disease in sport horses, which can be diagnosed by examination of bronchoalveolar lavage fluid (BALF) cells using the total hemosiderin score (THS). The aim of this study was to evaluate the diagnostic accuracy and reproducibility of annotators and to validate a deep learning-based algorithm for the THS. Digitized cytological specimens stained for iron were prepared from 52 equine BALF samples. Ten annotators produced a THS for each slide according to published methods. The reference methods for comparing annotator's and algorithmic performance included a ground truth dataset, the mean annotators' THSs, and chemical iron measurements. Results of the study showed that annotators had marked interobserver variability of the THS, which was mostly due to a systematic error between annotators in grading the intracytoplasmatic hemosiderin content of individual macrophages. Regarding overall measurement error between the annotators, 87.7\% of the variance could be reduced by using standardized grades based on the ground truth. The algorithm was highly consistent with the ground truth in assigning hemosiderin grades. Compared with the ground truth THS, annotators had an accuracy of diagnosing EIPH (THS of < or ≥ 75) of 75.7\%, whereas, the algorithm had an accuracy of 92.3\% with no relevant differences in correlation with chemical iron measurements. The results show that deep learning-based algorithms are useful for improving reproducibility and routine applicability of the THS. For THS by experts, a diagnostic uncertainty interval of 40 to 110 is proposed. THSs within this interval have insufficient reproducibility regarding the EIPH diagnosis.}, language = {en} } @inproceedings{WilmBertramMarzahletal.2021, author = {Wilm, Frauke and Bertram, Christof and Marzahl, Christian and Bartel, Alexander and Donovan, Taryn A. and Assenmacher, Charles-Antoine and Becker, Kathrin and Bennett, Mark and Corner, Sarah M. and Cossic, Brieuc and Denk, Daniela and Dettwiler, Martina and Garcia Gonzalez, Beatriz and Gurtner, Corinne and Heier, Annabelle and Lehmbecker, Annika and Merz, Sophie and Plog, Stephanie and Schmidt, Anja and Sebastian, Franziska and Smedley, Rebecca C. and Tecilla, Marco and Thaiwong, Tuddow and Breininger, Katharina and Kiupel, Matti and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Influence of inter-annotator variability on automatic mitotic figure assessment}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021}, publisher = {Springer}, address = {Wiesbaden}, isbn = {978-3-658-33198-6}, doi = {https://doi.org/10.1007/978-3-658-33198-6_56}, pages = {241 -- 246}, year = {2021}, language = {en} } @inproceedings{BertramDonovanTecillaetal.2021, author = {Bertram, Christof and Donovan, Taryn A. and Tecilla, Marco and Bartenschlager, Florian and Fragoso-Garcia, Marco and Wilm, Frauke and Marzahl, Christian and Breininger, Katharina and Maier, Andreas and Klopfleisch, Robert and Aubreville, Marc}, title = {Dataset on bi- and multi-nucleated tumor cells in canine cutaneous mast cell tumors}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021: Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-33198-6_33}, pages = {134 -- 139}, year = {2021}, language = {en} } @inproceedings{MarzahlBertramWilmetal.2021, author = {Marzahl, Christian and Bertram, Christof and Wilm, Frauke and Voigt, J{\"o}rn and Barton, Ann K. and Klopfleisch, Robert and Breininger, Katharina and Maier, Andreas and Aubreville, Marc}, title = {Cell detection for asthma on partially annotated whole slide images}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021: Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, subtitle = {learning to be EXACT}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-33198-6_36}, pages = {147 -- 152}, year = {2021}, language = {en} } @inproceedings{AubrevilleBertramKlopfleischetal.2019, author = {Aubreville, Marc and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas}, title = {Augmented mitotic cell count using field of interest proposal}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2019}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-25325-7}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-25326-4_71}, pages = {321 -- 326}, year = {2019}, language = {en} } @article{MeutenMooreDonovanetal.2021, author = {Meuten, Donald J. and Moore, Frances M. and Donovan, Taryn A. and Bertram, Christof and Klopfleisch, Robert and Foster, Robert A. and Smedley, Rebecca C. and Dark, Michael J. and Milovancev, Milan and Stromberg, Paul and Williams, Bruce H. and Aubreville, Marc and Avallone, Giancarlo and Bolfa, Pompei and Cullen, John and Dennis, Michelle M. and Goldschmidt, Michael and Luong, Richard and Miller, Andrew D. and Miller, Margaret A. and Munday, John S. and Roccabianca, Paola and Salas, Elisa N. and Schulman, F. Yvonne and Laufer-Amorim, Renee and Asakawa, Midori G. and Craig, Linden and Dervisis, Nick and Esplin, D. Glen and George, Jeanne W. and Hauck, Marlene and Kagawa, Yumiko and Kiupel, Matti and Linder, Keith and Meichner, Kristina and Marconato, Laura and Oblak, Michelle L. and Santos, Renato L. and Simpson, R. Mark and Tvedten, Harold and Whitley, Derick}, title = {International guidelines for veterinary tumor pathology}, volume = {58}, journal = {Veterinary pathology}, subtitle = {a call to action}, number = {5}, publisher = {Sage}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/03009858211013712}, pages = {766 -- 794}, year = {2021}, language = {en} } @article{DonovanMooreBertrametal.2021, author = {Donovan, Taryn A. and Moore, Frances M. and Bertram, Christof and Luong, Richard and Bolfa, Pompei and Klopfleisch, Robert and Tvedten, Harold and Salas, Elisa N. and Whitley, Derick and Aubreville, Marc and Meuten, Donald J.}, title = {Mitotic figures - normal, atypical, and imposters}, volume = {58}, journal = {Veterinary pathology}, subtitle = {a guide to identification}, number = {2}, publisher = {Sage}, address = {London}, issn = {1544-2217}, doi = {https://doi.org/10.1177/0300985820980049}, pages = {243 -- 257}, year = {2021}, language = {en} } @inproceedings{MarzahlAubrevilleBertrametal.2020, author = {Marzahl, Christian and Aubreville, Marc and Bertram, Christof and Gerlach, Stefan and Maier, Jennifer and Voigt, J{\"o}rn and Hill, Jenny and Klopfleisch, Robert and Maier, Andreas}, title = {Is crowd-algorithm collaboration an advanced alternative to crowd-sourcing on cytology slides?}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-29267-6_5}, pages = {26 -- 31}, year = {2020}, language = {en} } @inproceedings{AubrevilleBertramJabarietal.2020, author = {Aubreville, Marc and Bertram, Christof and Jabari, Samir and Marzahl, Christian and Klopfleisch, Robert and Maier, Andreas}, title = {Inter-species, inter-tissue domain adaptation for mitotic figure assessment}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2020}, subtitle = {learning new tricks from old dogs}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-29266-9}, issn = {1431-472X}, doi = {https://doi.org/10.1007/978-3-658-29267-6_1}, pages = {1 -- 7}, year = {2020}, language = {en} } @inproceedings{TheelkeWilmMarzahletal.2021, author = {Theelke, Luisa and Wilm, Frauke and Marzahl, Christian and Bertram, Christof and Klopfleisch, Robert and Maier, Andreas and Aubreville, Marc and Breininger, Katharina}, title = {Iterative Cross-Scanner Registration for Whole Slide Images}, booktitle = {2021 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-0191-3}, issn = {2473-9944}, doi = {https://doi.org/10.1109/ICCVW54120.2021.00071}, pages = {582 -- 590}, year = {2021}, language = {en} } @inproceedings{LausserBertramKlopfleischetal.2023, author = {Lausser, Ludwig and Bertram, Christof and Klopfleisch, Robert and Aubreville, Marc}, title = {Limits of Human Expert Ensembles in Mitosis Multi-expert Ground Truth Generation}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2023: Proceedings, German Workshop on Medical Image Computing, Braunschweig, July 2-4, 2023}, editor = {Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-41657-7}, doi = {https://doi.org/10.1007/978-3-658-41657-7_27}, pages = {116 -- 121}, year = {2023}, language = {en} } @inproceedings{AmmelingMangerKwakaetal.2023, author = {Ammeling, Jonas and Manger, Carina and Kwaka, Elias and Kr{\"u}gel, Sebastian and Uhl, Matthias and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Riener, Andreas and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Appealing but Potentially Biasing - Investigation of the Visual Representation of Segmentation Predictions by AI Recommender Systems for Medical Decision Making}, booktitle = {Mensch und Computer 2023: Building Bridges: Tagungsband (Proceedings)}, editor = {Stolze, Markus and Loch, Frieder and Baldauf, Matthias and Alt, Florian and Schneegass, Christina and Kosch, Thomas and Hirzle, Teresa and Sadeghian, Shadan and Draxler, Fiona and Bektas, Kenan and Lohan, Katrin and Knierim, Pascal}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-0771-1}, doi = {https://doi.org/10.1145/3603555.3608561}, pages = {330 -- 335}, year = {2023}, language = {en} } @inproceedings{GanzLipnikAmmelingetal.2023, author = {Ganz, Jonathan and Lipnik, Karoline and Ammeling, Jonas and Richter, Barbara and Puget, Chlo{\´e} and Parlak, Eda and Diehl, Laura and Klopfleisch, Robert and Donovan, Taryn A. and Kiupel, Matti and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Deep Learning-based Automatic Assessment of AgNOR-scores in Histopathology Images}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2023: Proceedings, German Workshop on Medical Image Computing, Braunschweig, July 2-4, 2023}, editor = {Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-41657-7}, doi = {https://doi.org/10.1007/978-3-658-41657-7_49}, pages = {226 -- 231}, year = {2023}, language = {en} } @inproceedings{AubrevilleGanzAmmelingetal.2023, author = {Aubreville, Marc and Ganz, Jonathan and Ammeling, Jonas and Donovan, Taryn A. and Fick, Rutger H. J. and Breininger, Katharina and Bertram, Christof}, title = {Deep Learning-based Subtyping of Atypical and Normal Mitoses using a Hierarchical Anchor-free Object Detector}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2023: Proceedings, German Workshop on Medical Image Computing, Braunschweig, July 2-4, 2023}, editor = {Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-41657-7}, doi = {https://doi.org/10.1007/978-3-658-41657-7_40}, pages = {189 -- 195}, year = {2023}, language = {en} } @article{AubrevilleStathonikosBertrametal.2022, author = {Aubreville, Marc and Stathonikos, Nikolas and Bertram, Christof and Klopfleisch, Robert and Hoeve, Natalie ter and Ciompi, Francesco and Wilm, Frauke and Marzahl, Christian and Donovan, Taryn A. and Maier, Andreas and Breen, Jack and Ravikumar, Nishant and Chung, Youjin and Park, Jinah and Nateghi, Ramin and Pourakpour, Fattaneh and Fick, Rutger H. J. and Ben Hadj, Saima and Jahanifar, Mostafa and Shepard, Adam and Dexl, Jakob and Wittenberg, Thomas and Kondo, Satoshi and Lafarge, Maxime W. and Kolezer, Viktor H. and Liang, Jingtang and Wang, Yubo and Long, Xi and Liu, Jingxin and Razavi, Salar and Khademi, April and Yang, Sen and Wang, Xiyue and Erber, Ramona and Klang, Andrea and Lipnik, Karoline and Bolfa, Pompei and Dark, Michael J. and Wasinger, Gabriel and Veta, Mitko and Breininger, Katharina}, title = {Mitosis domain generalization in histopathology images — The MIDOG challenge}, volume = {2023}, pages = {102699}, journal = {Medical Image Analysis}, number = {84}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1361-8415}, doi = {https://doi.org/10.1016/j.media.2022.102699}, year = {2022}, language = {en} } @article{BertramKlopfleischBarteletal.2022, author = {Bertram, Christof and Klopfleisch, Robert and Bartel, Alexander and Donovan, Taryn A. and Fuchs-Baumgartinger, Andrea and Breininger, Katharina and Kiupel, Matti and Aubreville, Marc}, title = {Expert Review of Algorithmic Mitotic Count Predictions Ensures High Reliability}, journal = {Journal of Comparative Pathology}, number = {191}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0021-9975}, doi = {https://doi.org/10.1016/j.jcpa.2021.11.027}, pages = {12}, year = {2022}, language = {en} } @inproceedings{WilmFragosoGarciaBertrametal.2023, author = {Wilm, Frauke and Fragoso-Garcia, Marco and Bertram, Christof and Stathonikos, Nikolas and {\"O}ttl, Mathias and Qiu, Jingna and Klopfleisch, Robert and Maier, Andreas and Aubreville, Marc and Breininger, Katharina}, title = {Mind the Gap: Scanner-Induced Domain Shifts Pose Challenges for Representation Learning in Histopathology}, booktitle = {2023 IEEE 20th International Symposium on Biomedical Imaging (ISBI)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-7358-3}, doi = {https://doi.org/10.1109/ISBI53787.2023.10230458}, year = {2023}, language = {en} } @unpublished{WilmFragosoGarciaBertrametal.2022, author = {Wilm, Frauke and Fragoso-Garcia, Marco and Bertram, Christof and Stathonikos, Nikolas and {\"O}ttl, Mathias and Qiu, Jingna and Klopfleisch, Robert and Maier, Andreas and Aubreville, Marc and Breininger, Katharina}, title = {Mind the Gap: Scanner-induced domain shifts pose challenges for representation learning in histopathology}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2211.16141}, year = {2022}, language = {en} } @article{AubrevilleWilmStathonikosetal.2023, author = {Aubreville, Marc and Wilm, Frauke and Stathonikos, Nikolas and Breininger, Katharina and Donovan, Taryn A. and Jabari, Samir and Veta, Mitko and Ganz, Jonathan and Ammeling, Jonas and van Diest, Paul J. and Klopfleisch, Robert and Bertram, Christof}, title = {A comprehensive multi-domain dataset for mitotic figure detection}, volume = {10}, pages = {484}, journal = {Scientific Data}, publisher = {Springer Nature}, address = {London}, issn = {2052-4463}, doi = {https://doi.org/10.1038/s41597-023-02327-4}, year = {2023}, abstract = {The prognostic value of mitotic figures in tumor tissue is well-established for many tumor types and automating this task is of high research interest. However, especially deep learning-based methods face performance deterioration in the presence of domain shifts, which may arise from different tumor types, slide preparation and digitization devices. We introduce the MIDOG++ dataset, an extension of the MIDOG 2021 and 2022 challenge datasets. We provide region of interest images from 503 histological specimens of seven different tumor types with variable morphology with in total labels for 11,937 mitotic figures: breast carcinoma, lung carcinoma, lymphosarcoma, neuroendocrine tumor, cutaneous mast cell tumor, cutaneous melanoma, and (sub)cutaneous soft tissue sarcoma. The specimens were processed in several laboratories utilizing diverse scanners. We evaluated the extent of the domain shift by using state-of-the-art approaches, observing notable differences in single-domain training. In a leave-one-domain-out setting, generalizability improved considerably. This mitotic figure dataset is the first that incorporates a wide domain shift based on different tumor types, laboratories, whole slide image scanners, and species.}, language = {en} } @unpublished{AubrevilleGanzAmmelingetal.2024, author = {Aubreville, Marc and Ganz, Jonathan and Ammeling, Jonas and Kaltenecker, Christopher C. and Bertram, Christof}, title = {Model-based Cleaning of the QUILT-1M Pathology Dataset for Text-Conditional Image Synthesis}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2404.07676}, year = {2024}, abstract = {The QUILT-1M dataset is the first openly available dataset containing images harvested from various online sources. While it provides a huge data variety, the image quality and composition is highly heterogeneous, impacting its utility for text-conditional image synthesis. We propose an automatic pipeline that provides predictions of the most common impurities within the images, e.g., visibility of narrators, desktop environment and pathology software, or text within the image. Additionally, we propose to use semantic alignment filtering of the image-text pairs. Our findings demonstrate that by rigorously filtering the dataset, there is a substantial enhancement of image fidelity in text-to-image tasks.}, language = {en} }