@article{ArribasAntonelliFrazzonietal., author = {Arribas, Julia and Antonelli, Giulio and Frazzoni, Leonardo and Fuccio, Lorenzo and Ebigbo, Alanna and van der Sommen, Fons and Ghatwary, Noha and Palm, Christoph and Coimbra, Miguel and Renna, Francesco and Bergman, Jacques J.G.H.M. and Sharma, Prateek and Messmann, Helmut and Hassan, Cesare and Dinis-Ribeiro, Mario J.}, title = {Standalone performance of artificial intelligence for upper GI neoplasia: a meta-analysis}, series = {Gut}, volume = {70}, journal = {Gut}, number = {8}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2020-321922}, pages = {1458 -- 1468}, abstract = {Objective: Artificial intelligence (AI) may reduce underdiagnosed or overlooked upper GI (UGI) neoplastic and preneoplastic conditions, due to subtle appearance and low disease prevalence. Only disease-specific AI performances have been reported, generating uncertainty on its clinical value. Design: We searched PubMed, Embase and Scopus until July 2020, for studies on the diagnostic performance of AI in detection and characterisation of UGI lesions. Primary outcomes were pooled diagnostic accuracy, sensitivity and specificity of AI. Secondary outcomes were pooled positive (PPV) and negative (NPV) predictive values. We calculated pooled proportion rates (\%), designed summary receiving operating characteristic curves with respective area under the curves (AUCs) and performed metaregression and sensitivity analysis. Results: Overall, 19 studies on detection of oesophageal squamous cell neoplasia (ESCN) or Barrett's esophagus-related neoplasia (BERN) or gastric adenocarcinoma (GCA) were included with 218, 445, 453 patients and 7976, 2340, 13 562 images, respectively. AI-sensitivity/specificity/PPV/NPV/positive likelihood ratio/negative likelihood ratio for UGI neoplasia detection were 90\% (CI 85\% to 94\%)/89\% (CI 85\% to 92\%)/87\% (CI 83\% to 91\%)/91\% (CI 87\% to 94\%)/8.2 (CI 5.7 to 11.7)/0.111 (CI 0.071 to 0.175), respectively, with an overall AUC of 0.95 (CI 0.93 to 0.97). No difference in AI performance across ESCN, BERN and GCA was found, AUC being 0.94 (CI 0.52 to 0.99), 0.96 (CI 0.95 to 0.98), 0.93 (CI 0.83 to 0.99), respectively. Overall, study quality was low, with high risk of selection bias. No significant publication bias was found. Conclusion: We found a high overall AI accuracy for the diagnosis of any neoplastic lesion of the UGI tract that was independent of the underlying condition. This may be expected to substantially reduce the miss rate of precancerous lesions and early cancer when implemented in clinical practice.}, language = {en} } @article{EbigboMendelRueckertetal., author = {Ebigbo, Alanna and Mendel, Robert and R{\"u}ckert, Tobias and Schuster, Laurin and Probst, Andreas and Manzeneder, Johannes and Prinz, Friederike and Mende, Matthias and Steinbr{\"u}ck, Ingo and Faiss, Siegbert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Deprez, Pierre and Oyama, Tsuneo and Takahashi, Akiko and Seewald, Stefan and Sharma, Prateek and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Endoscopic prediction of submucosal invasion in Barrett's cancer with the use of Artificial Intelligence: A pilot Study}, series = {Endoscopy}, volume = {53}, journal = {Endoscopy}, number = {09}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/a-1311-8570}, pages = {878 -- 883}, abstract = {Background and aims: The accurate differentiation between T1a and T1b Barrett's cancer has both therapeutic and prognostic implications but is challenging even for experienced physicians. We trained an Artificial Intelligence (AI) system on the basis of deep artificial neural networks (deep learning) to differentiate between T1a and T1b Barrett's cancer white-light images. Methods: Endoscopic images from three tertiary care centres in Germany were collected retrospectively. A deep learning system was trained and tested using the principles of cross-validation. A total of 230 white-light endoscopic images (108 T1a and 122 T1b) was evaluated with the AI-system. For comparison, the images were also classified by experts specialized in endoscopic diagnosis and treatment of Barrett's cancer. Results: The sensitivity, specificity, F1 and accuracy of the AI-system in the differentiation between T1a and T1b cancer lesions was 0.77, 0.64, 0.73 and 0.71, respectively. There was no statistically significant difference between the performance of the AI-system and that of human experts with sensitivity, specificity, F1 and accuracy of 0.63, 0.78, 0.67 and 0.70 respectively. Conclusion: This pilot study demonstrates the first multicenter application of an AI-based system in the prediction of submucosal invasion in endoscopic images of Barrett's cancer. AI scored equal to international experts in the field, but more work is necessary to improve the system and apply it to video sequences and in a real-life setting. Nevertheless, the correct prediction of submucosal invasion in Barret´s cancer remains challenging for both experts and AI.}, subject = {Maschinelles Lernen}, language = {en} } @article{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Assisting Barrett's esophagus identification using endoscopic data augmentation based on Generative Adversarial Networks}, series = {Computers in Biology and Medicine}, volume = {126}, journal = {Computers in Biology and Medicine}, number = {November}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2020.104029}, pages = {12}, abstract = {Barrett's esophagus figured a swift rise in the number of cases in the past years. Although traditional diagnosis methods offered a vital role in early-stage treatment, they are generally time- and resource-consuming. In this context, computer-aided approaches for automatic diagnosis emerged in the literature since early detection is intrinsically related to remission probabilities. However, they still suffer from drawbacks because of the lack of available data for machine learning purposes, thus implying reduced recognition rates. This work introduces Generative Adversarial Networks to generate high-quality endoscopic images, thereby identifying Barrett's esophagus and adenocarcinoma more precisely. Further, Convolution Neural Networks are used for feature extraction and classification purposes. The proposed approach is validated over two datasets of endoscopic images, with the experiments conducted over the full and patch-split images. The application of Deep Convolutional Generative Adversarial Networks for the data augmentation step and LeNet-5 and AlexNet for the classification step allowed us to validate the proposed methodology over an extensive set of datasets (based on original and augmented sets), reaching results of 90\% of accuracy for the patch-based approach and 85\% for the image-based approach. Both results are based on augmented datasets and are statistically different from the ones obtained in the original datasets of the same kind. Moreover, the impact of data augmentation was evaluated in the context of image description and classification, and the results obtained using synthetic images outperformed the ones over the original datasets, as well as other recent approaches from the literature. Such results suggest promising insights related to the importance of proper data for the accurate classification concerning computer-assisted Barrett's esophagus and adenocarcinoma detection.}, subject = {Maschinelles Lernen}, language = {en} } @unpublished{WeihererEigenbergerBrebantetal., author = {Weiherer, Maximilian and Eigenberger, Andreas and Br{\´e}bant, Vanessa and Prantl, Lukas and Palm, Christoph}, title = {Learning the shape of female breasts: an open-access 3D statistical shape model of the female breast built from 110 breast scans}, pages = {15}, abstract = {We present the Regensburg Breast Shape Model (RBSM) - a 3D statistical shape model of the female breast built from 110 breast scans, and the first ever publicly available. Together with the model, a fully automated, pairwise surface registration pipeline used to establish correspondence among 3D breast scans is introduced. Our method is computationally efficient and requires only four landmarks to guide the registration process. In order to weaken the strong coupling between breast and thorax, we propose to minimize the variance outside the breast region as much as possible. To achieve this goal, a novel concept called breast probability masks (BPMs) is introduced. A BPM assigns probabilities to each point of a 3D breast scan, telling how likely it is that a particular point belongs to the breast area. During registration, we use BPMs to align the template to the target as accurately as possible inside the breast region and only roughly outside. This simple yet effective strategy significantly reduces the unwanted variance outside the breast region, leading to better statistical shape models in which breast shapes are quite well decoupled from the thorax. The RBSM is thus able to produce a variety of different breast shapes as independently as possible from the shape of the thorax. Our systematic experimental evaluation reveals a generalization ability of 0.17 mm and a specificity of 2.8 mm for the RBSM. Ultimately, our model is seen as a first step towards combining physically motivated deformable models of the breast and statistical approaches in order to enable more realistic surgical outcome simulation.}, language = {en} } @inproceedings{PalmMetzlerMohametal., author = {Palm, Christoph and Metzler, V. and Moham, B. and Dieker, O. and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Co-Occurrence Matrizen zur Texturklassifikation in Vektorbildern}, series = {Bildverarbeitung f{\"u}r die Medizin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin}, editor = {Evers, H. and Glombitza, G. and Lehmann, Thomas M. and Meinzer, H.-P.}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-642-60125-5_69}, pages = {367 -- 371}, abstract = {Statistische Eigenschaften nat{\"u}rlicher Grauwerttexturen werden mit Co-Occurrence Matrizen, basierend auf der Grauwertstatistik zweiter Ordnung, modelliert. Die Matrix gibt dann die apriori Wahrscheinlichkeiten aller Grauwertpaare an. Da in der medizinischen Bildverarbeitung verst{\"a}rkt Multispektralbilder ausgewertet werden, wird das bekannte Konzept hier auf beliebige Vektorbilder erweitert. Dadurch kann bei der Texturklassifikation die zur Verf{\"u}gung stehende Information vollst{\"a}ndig genutzt werden. Insbesondere zur Detektion von Farbtexturen ist dieser Ansatz geeignet, da Wertepaare unterschiedlicher Spektralebenen ausgewertet werden k{\"o}nnen. Ebenso kann die Methode auch bei der Multiskalendekomposition von Intensit{\"a}tsbildern zur Verbesserung der Texturerkennung beitragen. Die in den Matrizen entstehenden Muster lassen dann {\"u}ber die Extraktion geeigneter Texturdeskriptoren R{\"u}ckschl{\"u}sse auf die Texturen des Bildes zu.}, language = {de} } @inproceedings{PalmPelkmannLehmannetal., author = {Palm, Christoph and Pelkmann, Annegret and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Distortion Correction of Laryngoscopic Images}, series = {Advances in quantitative laryngoscopy, voice and speech research, Proceedings of the 3rd international workshop Aachen, RWTH}, booktitle = {Advances in quantitative laryngoscopy, voice and speech research, Proceedings of the 3rd international workshop Aachen, RWTH}, pages = {117 -- 125}, abstract = {Laryngoscopic images of the vocal tract are used for diagnostic purposes. Quantitative mea-surements like changes of the glottis size or the surface of the vocal cords during an image sequence can be helpful to describe the healing process or to compare the findings of diffe-rent patients. Typically the endoscopic images are circulary symmetric distorted (barrel di-stortion). Therefore measurements of geometric dimensions depend on the object´s position in the image. In this paper an algorithm is presented which allows the computation of the translational invariant "real" object size by correcting the image distortion without using additional calibration of the optical environment.}, language = {en} } @inproceedings{PalmNeuschaeferRubeLehmannetal., author = {Palm, Christoph and Neuschaefer-Rube, C. and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Wissensbasierte Bewegungskompensation in aktiven Konturmodellen}, series = {Bildverarbeitung f{\"u}r die Medizin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin}, editor = {Evers, H. and Glombitza, G. and Lehmann, Thomas M. and Meinzer, H.-P.}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-642-60125-5_2}, pages = {8 -- 12}, abstract = {Zur Analyse von Lippenbewegungsabl{\"a}ufen wird ein aktives Konturmodell eingesetzt. Probleme bereitet die hohe Sprechgeschwindigkeit, die in star ken Objektverschiebungen result iert und bislang nicht durch eine alleinige Konturanpassung kompensiert werden kann. In diesem Beitrag werden die klassischen aktiven Konturmodelle um eine Vorjustierung der Grobkonturen erweitert, die eine energiebasierte Konturanpassung erst m{\"o}glich macht. Die Sch{\"a}tzung der Verschiebung zur Vorjustierung basiert auf dem Gradientenbild und einem pr{\"a}dikatenlogisch formulierten Regelwerk, das Annahmen und Nebenbedingungen als Wissensbasis enth{\"a}lt. Mit Hilfe dieser Erweiterungen ist eine automatisierte Konturverfolgung der Lippen m{\"o}glich.}, language = {de} } @inproceedings{PalmSchollLehmannetal., author = {Palm, Christoph and Scholl, Ingrid and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Quantitative Farbmessung in laryngoskopischen Bildern}, series = {Bildverarbeitung f{\"u}r die Medizin}, booktitle = {Bildverarbeitung f{\"u}r die Medizin}, editor = {Lehmann, Thomas M. and Metzler, V. and Spitzer, Klaus and Tolxdorff, Thomas}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-642-58775-7_81}, pages = {412 -- 416}, abstract = {Quantitative Farbmessungen sollen die Diagnostik laryngealer Erkrankungen unterst{\"u}tzen. Dabei wird der Farbeindruck nicht nur durch die Reflexionseigenschaften des Gewebes sondern auch durch die Farbe der verwendeten Lichtquelle beeinflußt. Der hier vorgestellte Farbkonstanz-Algorithmus basiert auf dem dichromatischen Reflexionsmodell und liefert eine pixelweise Trennung des Farbbildes in seine beiden Faxbanteile. Die K{\"o}rperfarbe entspricht dabei der gewebespezifischen Reflexion, die Oberf{\"a}chenfarbe der Strahlung der Lichtquelle.}, language = {de} } @article{MendelRauberSouzaJretal., author = {Mendel, Robert and Rauber, David and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Error-Correcting Mean-Teacher: Corrections instead of consistency-targets applied to semi-supervised medical image segmentation}, series = {Computers in Biology and Medicine}, volume = {154}, journal = {Computers in Biology and Medicine}, number = {March}, publisher = {Elsevier}, issn = {0010-4825}, doi = {10.1016/j.compbiomed.2023.106585}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-57790}, pages = {13}, abstract = {Semantic segmentation is an essential task in medical imaging research. Many powerful deep-learning-based approaches can be employed for this problem, but they are dependent on the availability of an expansive labeled dataset. In this work, we augment such supervised segmentation models to be suitable for learning from unlabeled data. Our semi-supervised approach, termed Error-Correcting Mean-Teacher, uses an exponential moving average model like the original Mean Teacher but introduces our new paradigm of error correction. The original segmentation network is augmented to handle this secondary correction task. Both tasks build upon the core feature extraction layers of the model. For the correction task, features detected in the input image are fused with features detected in the predicted segmentation and further processed with task-specific decoder layers. The combination of image and segmentation features allows the model to correct present mistakes in the given input pair. The correction task is trained jointly on the labeled data. On unlabeled data, the exponential moving average of the original network corrects the student's prediction. The combined outputs of the students' prediction with the teachers' correction form the basis for the semi-supervised update. We evaluate our method with the 2017 and 2018 Robotic Scene Segmentation data, the ISIC 2017 and the BraTS 2020 Challenges, a proprietary Endoscopic Submucosal Dissection dataset, Cityscapes, and Pascal VOC 2012. Additionally, we analyze the impact of the individual components and examine the behavior when the amount of labeled data varies, with experiments performed on two distinct segmentation architectures. Our method shows improvements in terms of the mean Intersection over Union over the supervised baseline and competing methods. Code is available at https://github.com/CloneRob/ECMT.}, language = {en} } @article{MaierDesernoHandelsetal., author = {Maier, Andreas and Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Palm, Christoph and Tolxdorff, Thomas}, title = {IJCARS: BVM 2021 special issue}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {16}, journal = {International Journal of Computer Assisted Radiology and Surgery}, publisher = {Springer}, doi = {10.1007/s11548-021-02534-7}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-21666}, pages = {2067 -- 2068}, abstract = {The German workshop on medical image computing (BVM) has been held in different locations in Germany for more than 20 years. In terms of content, BVM focused on the computer-aided analysis of medical image data with a wide range of applications, e.g. in the area of imaging, diagnostics, operation planning, computer-aided intervention and visualization. During this time, there have been remarkable methodological developments and upheavals, on which the BVM community has worked intensively. The area of machine learning should be emphasized, which has led to significant improvements, especially for tasks of classification and segmentation, but increasingly also in image formation and registration. As a result, work in connection with deep learning now dominates the BVM. These developments have also contributed to the establishment of medical image processing at the interface between computer science and medicine as one of the key technologies for the digitization of the health system. In addition to the presentation of current research results, a central aspect of the BVM is primarily the promotion of young scientists from the diverse BVM community, covering not only Germany but also Austria, Switzerland, The Netherland and other European neighbors. The conference serves primarily doctoral students and postdocs, but also students with excellent bachelor and master theses as a platform to present their work, to enter into professional discourse with the community, and to establish networks with specialist colleagues. Despite the many conferences and congresses that are also relevant for medical image processing, the BVM has therefore lost none of its importance and attractiveness and has retained its permanent place in the annual conference rhythm. Building on this foundation, there are some innovations and changes this year. The BVM 2021 was organized for the first time at the Ostbayerische Technische Hochschule Regensburg (OTH Regensburg, a technical university of applied sciences). After Aachen, Berlin, Erlangen, Freiburg, Hamburg, Heidelberg, Leipzig, L{\"u}beck, and Munich, Regensburg is not just a new venue. OTH Regensburg is the first representative of the universities of applied sciences (HAW) to organize the conference, which differs to universities, university hospitals, or research centers like Fraunhofer or Helmholtz. This also considers the further development of the research landscape in Germany, where HAWs increasingly contribute to applied research in addition to their focus on teaching. This development is also reflected in the contributions submitted to the BVM in recent years. At BVM 2021, which was held in a virtual format for the first time due to the Corona pandemic, an attractive and high-quality program was offered. Fortunately, the number of submissions increased significantly. Out of 97 submissions, 26 presentations, 51 posters and 5 software demonstrations were accepted via an anonymized reviewing process with three reviews each. The three best works have been awarded BVM prizes, selected by a separate committee. Based on these high-quality submissions, we are able to present another special issue in the International Journal of Computer Assisted Radiology and Surgery (IJCARS). Out of the 97 submissions, the ones with the highest scores have been invited to submit an extended version of their paper to be presented in IJCARS. As a result, we are now able to present this special issue with seven excellent articles. Many submissions focus on machine learning in a medical context.}, subject = {Bildgebendes Verfahren}, language = {en} } @article{SchulzePalmKerschbaumetal., author = {Schulze, Elke and Palm, Christoph and Kerschbaum, Maximilian and Seidel, Roman and Lehmann, Lars and Koller, Michael and Pfingsten, Andrea}, title = {KI-gest{\"u}tzte Untersuchung in der nicht-operativen Versorgung symptomgebender Erkrankungen des Kniegelenks - ein multiprofessionelles Konzept (KINEESIO)}, series = {MSK - Muskuloskelettale Physiotherapie}, volume = {28}, journal = {MSK - Muskuloskelettale Physiotherapie}, number = {5}, publisher = {Thieme}, issn = {2701-6986}, doi = {10.1055/a-2402-9982}, pages = {312 -- 321}, abstract = {Beschwerdebilder am Kniegelenk aufgrund muskuloskelettaler degenerativer oder verletzungsbedingter Erkrankungen sind h{\"a}ufig, nehmen im Alter zu und sind mit der steigenden Inanspruchnahme {\"a}rztlicher und therapeutischer Behandlungsmaßnahmen verbunden. Einer erfolgreichen Therapie gehen oft notwendige zeit- und ressourcenaufwendige Untersuchungen zur Erkennung und Differenzierung der patient*innenspezifischen Problematik voraus. Im Zusammenhang mit der nicht-operativen Versorgung des Kniegelenks hat ein sektor{\"u}bergreifendes multiprofessionelles Forschungsteam ein Konzept entwickelt, um k{\"u}nstliche neuronale Netze so zu trainieren, dass sie bei der {\"a}rztlichen und physiotherapeutischen Untersuchung unterst{\"u}tzend Einsatz finden k{\"o}nnen. Denn gerade in der Erfassung und Auswertung umfassender Datenmengen liegen große Potenziale in der K{\"u}nstlichen Intelligenz (KI) im Gesundheitswesen. Das Projekt KINEESIO trainiert und testet KI-gest{\"u}tzte Screening- Tools zur Untersuchung von Patient*innen mit Kniegelenkerkrankungen. Diese unterst{\"u}tzen die Abl{\"a}ufe zwischen Leistungserbringern und Patient*innen, tragen zu einer verbesserten Differenzierung individueller Beschwerdebilder bei und dienen Entscheidungsprozessen f{\"u}r eine ad{\"a}quate Versorgung. Dadurch sollen Ressourcen im Gesundheitswesen geschont und eine qualitativ hochwertige Therapie ausreichend erm{\"o}glicht werden.}, language = {de} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Seitz, Stephan and Lotter, Luisa and Anker, Alexandra and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {A Novel Method of Outcome Assessment in Breast Reconstruction Surgery: Comparison of Autologous and Alloplastic Techniques Using Three-Dimensional Surface Imaging}, series = {Aesthetic Plastic Surgery}, volume = {44}, journal = {Aesthetic Plastic Surgery}, publisher = {Springer}, address = {Heidelberg}, doi = {10.1007/s00266-020-01749-4}, pages = {1980 -- 1987}, abstract = {Background Breast reconstruction is an important coping tool for patients undergoing a mastectomy. There are numerous surgical techniques in breast reconstruction surgery (BRS). Regardless of the technique used, creating a symmetric outcome is crucial for patients and plastic surgeons. Three-dimensional surface imaging enables surgeons and patients to assess the outcome's symmetry in BRS. To discriminate between autologous and alloplastic techniques, we analyzed both techniques using objective optical computerized symmetry analysis. Software was developed that enables clinicians to assess optical breast symmetry using three-dimensional surface imaging. Methods Twenty-seven patients who had undergone autologous (n = 12) or alloplastic (n = 15) BRS received three-dimensional surface imaging. Anthropomorphic data were collected digitally using semiautomatic measurements and automatic measurements. Automatic measurements were taken using the newly developed software. To quantify symmetry, a Symmetry Index is proposed. Results Statistical analysis revealed that there is no dif- ference in the outcome symmetry between the two groups (t test for independent samples; p = 0.48, two-tailed). Conclusion This study's findings provide a foundation for qualitative symmetry assessment in BRS using automatized digital anthropometry. In the present trial, no difference in the outcomes' optical symmetry was detected between autologous and alloplastic approaches.}, subject = {Mammoplastik}, language = {en} } @article{HartmannWeihererSchiltzetal., author = {Hartmann, Robin and Weiherer, Maximilian and Schiltz, Daniel and Baringer, Magnus and Noisser, Vivien and H{\"o}sl, Vanessa and Eigenberger, Andreas and Seitz, Stefan and Palm, Christoph and Prantl, Lukas and Br{\´e}bant, Vanessa}, title = {New aspects in digital breast assessment: further refinement of a method for automated digital anthropometry}, series = {Archives of Gynecology and Obstetrics}, volume = {303}, journal = {Archives of Gynecology and Obstetrics}, publisher = {Springer Nature}, address = {Heidelberg}, issn = {1432-0711}, doi = {10.1007/s00404-020-05862-2}, pages = {721 -- 728}, abstract = {Purpose: In this trial, we used a previously developed prototype software to assess aesthetic results after reconstructive surgery for congenital breast asymmetry using automated anthropometry. To prove the consensus between the manual and automatic digital measurements, we evaluated the software by comparing the manual and automatic measurements of 46 breasts. Methods: Twenty-three patients who underwent reconstructive surgery for congenital breast asymmetry at our institution were examined and underwent 3D surface imaging. Per patient, 14 manual and 14 computer-based anthropometric measurements were obtained according to a standardized protocol. Manual and automatic measurements, as well as the previously proposed Symmetry Index (SI), were compared. Results: The Wilcoxon signed-rank test revealed no significant differences in six of the seven measurements between the automatic and manual assessments. The SI showed robust agreement between the automatic and manual methods. Conclusion: The present trial validates our method for digital anthropometry. Despite the discrepancy in one measurement, all remaining measurements, including the SI, showed high agreement between the manual and automatic methods. The proposed data bring us one step closer to the long-term goal of establishing robust instruments to evaluate the results of breast surgery.}, language = {en} } @inproceedings{MendelSouzaJrRauberetal., author = {Mendel, Robert and Souza Jr., Luis Antonio de and Rauber, David and Papa, Jo{\~a}o Paulo and Palm, Christoph}, title = {Semi-supervised Segmentation Based on Error-Correcting Supervision}, series = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, booktitle = {Computer vision - ECCV 2020: 16th European conference, Glasgow, UK, August 23-28, 2020, Proceedings, Part XXIX}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-58525-9}, doi = {10.1007/978-3-030-58526-6_9}, pages = {141 -- 157}, abstract = {Pixel-level classification is an essential part of computer vision. For learning from labeled data, many powerful deep learning models have been developed recently. In this work, we augment such supervised segmentation models by allowing them to learn from unlabeled data. Our semi-supervised approach, termed Error-Correcting Supervision, leverages a collaborative strategy. Apart from the supervised training on the labeled data, the segmentation network is judged by an additional network. The secondary correction network learns on the labeled data to optimally spot correct predictions, as well as to amend incorrect ones. As auxiliary regularization term, the corrector directly influences the supervised training of the segmentation network. On unlabeled data, the output of the correction network is essential to create a proxy for the unknown truth. The corrector's output is combined with the segmentation network's prediction to form the new target. We propose a loss function that incorporates both the pseudo-labels as well as the predictive certainty of the correction network. Our approach can easily be added to supervised segmentation models. We show consistent improvements over a supervised baseline on experiments on both the Pascal VOC 2012 and the Cityscapes datasets with varying amounts of labeled data.}, subject = {Semi-Supervised Learning}, language = {en} } @inproceedings{PalmSchollLehmannetal., author = {Palm, Christoph and Scholl, Ingrid and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Trennung von diffuser und spiegelnder Reflexion in Farbbildern des Larynx zur Untersuchung von Farb- und Formmerkmalen der Stimmlippen}, series = {Bildverarbeitung f{\"u}r die Medizin. Algorithmen, Systeme, Anwendungen. Proceedings des Aachener Workshops am 8. und 9. November 1996}, booktitle = {Bildverarbeitung f{\"u}r die Medizin. Algorithmen, Systeme, Anwendungen. Proceedings des Aachener Workshops am 8. und 9. November 1996}, editor = {Lehmann, Thomas M. and Scholl, Ingrid and Spitzer, Klaus}, publisher = {Verlag der Augustinus-Buchhandlung}, address = {Aachen}, pages = {229 -- 234}, abstract = {Zur diagnostischen Unterst{\"u}tzung bei der Befundung laryngealer Erkrankungen soll eine Farb- und Formanalyse der Stimmlippen durchgef{\"u}hrt werden. In diesem Beitrag wird ein Verfahren zur Trennung der spiegelnden und diffusen Reflexionsanteile in Farbbildern des Larynx vorgestellt. Die Farbe der diffusen Komponente entspricht dabei der beleuchtungsunabh{\"a}ngigen Objektfarbe, w{\"a}hrend deren Wichtungsfaktoren als Eingabe f{\"u}r Shape-from-Shading-Verfahren zur Oberfl{\"a}chenrekonstruktion dienen.}, subject = {Laryngoskopie}, language = {de} } @inproceedings{HassanIlgnerPalmetal., author = {Hassan, H. and Ilgner, Justus F. R. and Palm, Christoph and Lehmann, Thomas M. and Spitzer, Klaus and Westhofen, Martin}, title = {Objective Judgement of Endoscopic Laryngeal Images}, series = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Proceedings of the 3rd International Workshop, RWTH Aachen}, booktitle = {Advances in Quantitative Laryngoscopy, Voice and Speech Research, Proceedings of the 3rd International Workshop, RWTH Aachen}, editor = {Lehmann, Thomas M. and Spitzer, Klaus and Tolxdorff, Thomas}, pages = {135 -- 142}, abstract = {Video Documentation of endoscopic findings simplifies diagnostic counseling of the patient and aids pre-operative discussion among the medical team. Judgment of such images is still subjective and can not give a quantitative evaluation of the disease process regarding diagnosis or response to treatment. Modern treatment of early laryngeal cancer with laserablation requires intensive follow up and frequent direct laryngoscopy under general anesthesia with blind biopsies to detect any tumor residual or recurrence. Inflammatory conditions of the larynx are frequently confused with other causes of dysphonia. Mapping anddigital analysis of the documented image will suggest the tumor site and avoids undue blind biopsies under anesthesia. However, varying illumination results in different colors reflected from the same object. To achieve quantitative analysis, color constancy has to be assured. Inthis paper, the environment is presented which allow the objective judgment of larngoscopies.}, language = {en} } @inproceedings{SchollPalmSovakaretal., author = {Scholl, Ingrid and Palm, Christoph and Sovakar, Abhijit and Lehmann, Thomas M. and Spitzer, Klaus}, title = {Quantitative Analyse der Stimmlippen}, series = {5. Workshop Digitale Bildverarbeitung in der Medizin, Universit{\"a}t Freiburg, 10.-11. M{\"a}rz 1997}, booktitle = {5. Workshop Digitale Bildverarbeitung in der Medizin, Universit{\"a}t Freiburg, 10.-11. M{\"a}rz 1997}, editor = {Arnolds, B. and Mueller, H. and Saupe, D. and Tolxdorff, Thomas}, pages = {81 -- 86}, language = {de} } @misc{KreftingZaunsederSaeringetal., author = {Krefting, Dagmar and Zaunseder, Sebastian and S{\"a}ring, Dennis and Wittenberg, Thomas and Palm, Christoph and Schiecke, Karin and Krenkel, Lars and Hennemuth, Anja and Schnell, Susanne and Spicher, Nicolai}, title = {Blutdruck, H{\"a}modynamik und Gef{\"a}ßzustand: Innovative Erfassung und Bewertung - Schwerpunkt bildbasierte Verfahren}, series = {66. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e. V. (GMDS), 12. Jahreskongress der Technologie- und Methodenplattform f{\"u}r die vernetzte medizinische Forschung e. V. (TMF), 26. - 30.09.2021, online}, journal = {66. Jahrestagung der Deutschen Gesellschaft f{\"u}r Medizinische Informatik, Biometrie und Epidemiologie e. V. (GMDS), 12. Jahreskongress der Technologie- und Methodenplattform f{\"u}r die vernetzte medizinische Forschung e. V. (TMF), 26. - 30.09.2021, online}, doi = {10.3205/21gmds016}, url = {http://nbn-resolving.de/urn:nbn:de:0183-21gmds0167}, abstract = {Einleitung: Blutdruck gilt als sogenannter Vitalparameter als einer der grundlegenden Indikatoren f{\"u}r den Gesundheitszustand einer Person. Sowohl zu niedriger als auch zu hoher Blutdruck kann lebensbedrohend sein, letzerer ist dar{\"u}ber hinaus ein Risikofaktor insbesondere f{\"u}r Herz-Kreislauferkrankungen, die trotz wichtiger Fortschritte in der Behandlung immer noch die h{\"a}ufigste Todesursache in Deutschland darstellen. Die H{\"a}modynamik, also die raumzeitliche Dynamik des Blutflusses, und der Gef{\"a}ßzustand sind eng verbunden mit dem Blutdruck und ebenfalls von hoher klinischer Relevanz, u.a. zur Identifikation von Durchblutungsst{\"o}rungen und ung{\"u}nstigen Druckverteilungen der Gef{\"a}ßwand. Innovationen in der Messtechnik als auch in der Datenanalyse bieten heute neue M{\"o}glichkeiten der Erfassung und Bewertung von Blutdruck, H{\"a}modynamik und Gef{\"a}ßzustand [1], [2], [3], [4]. Methodik: In einer gemeinsamen Workshopserie der AG Medizinische Bild- und Signalverarbeitung der GMDS und des Fachausschusses Biosignale der DGBMT werden wir neue Ans{\"a}tze und L{\"o}sungen f{\"u}r Mess- und Analyseverfahren zu Blutdruck und -fluss sowie zum Gef{\"a}ßzustand vorstellen und diskutieren. Dabei stehen im ersten Workshop auf der GMDS Jahrestagung Bildbasierte Verfahren im Zentrum, w{\"a}hrend der zweite Workshop auf der DGBMT Jahrestagung den Fokus auf Biosignalbasierten Verfahren legt. Es werden aktuelle Forschungsergebnisse vorgestellt und diskutiert. Es sind jeweils mehrere Vortr{\"a}ge geplant mit ausreichend Zeit zur Diskussion. Folgende Vortr{\"a}ge sind geplant (Arbeitstitel): Sebastian Zaunseder: Videobasierte Erfassung des Blutdrucks Anja Hennemuth: A Visualization Toolkit for the Analysis of Aortic Anatomy and Pressure Distribution Lars Krenkel: Numerische Analyse der Rupturwahrscheinlichkeit zerebraler Aneurysmata Susanne Schnell: Messung des Blutflusses und h{\"a}modynamischer Parameter mit 4D flow MRI: M{\"o}glichkeiten und Herausforderungen Ergebnisse: Ziel des Workshops ist die Identifikation von innovativen Ans{\"a}tzen und neuen Methoden zur qualitativen und quantitativen Bestimmung von h{\"a}modynamischen Parametern sowie deren kritische Bewertung durch die Community f{\"u}r die Eignung in der klinischen Entscheidungsunterst{\"u}tzung. Diskussion: Der Workshop leistet inhaltlich einen Beitrag zu zentralen Aspekten f{\"u}r die Herz-Kreislauf-Medizin. Er bringt dabei Expertise aus verschiedenen Bereichen zusammen und schl{\"a}gt die Br{\"u}cke zwischen Kardiologie, Medizininformatik und Medizintechnik. Schlussfolgerung: Innovative Technologien aus Medizintechnik und Informatik erm{\"o}glichen zunehmend einfache und raumzeitlich aufgel{\"o}ste Erfassung und Bewertung wichtiger Informationen zur Unterst{\"u}tzung von Diagnose und Therapieverfolgung. [1] Zaunseder S, Trumpp A, Wedekind D, Malberg H. Cardiovascular assessment by imaging photoplethysmography - a review. Biomed Tech (Berl). 2018 Oct 25;63(5):617-34. [2] Huellebrand M, Messroghli D, Tautz L, Kuehne T, Hennemuth A. An extensible software platform for interdisciplinary cardiovascular imaging research. Comput Methods Programs Biomed. 2020 Feb;184:105277. [3] Schmitter S, Adriany G, Waks M, Moeller S, Aristova M, Vali A, et al. Bilateral Multiband 4D Flow MRI of the Carotid Arteries at 7T. Magn Reson Med. 2020 Oct;84(4):1947-60. [4] Birkenmaier C, and Krenkel, L. Flow in Artificial Lungs. In: New Results in Numerical and Experimental Fluid Mechanics XIII. Contributions to the 22nd STAB/DGLR Symposium. Springer; 2021.}, subject = {Blutdruck}, language = {de} } @article{GrassmannMengelkampBrandletal., author = {Graßmann, Felix and Mengelkamp, Judith and Brandl, Caroline and Harsch, Sebastian and Zimmermann, Martina E. and Linkohr, Birgit and Peters, Annette and Heid, Iris M. and Palm, Christoph and Weber, Bernhard H. F.}, title = {A Deep Learning Algorithm for Prediction of Age-Related Eye Disease Study Severity Scale for Age-Related Macular Degeneration from Color Fundus Photography}, series = {Ophtalmology}, volume = {125}, journal = {Ophtalmology}, number = {9}, publisher = {Elsevier}, doi = {10.1016/j.ophtha.2018.02.037}, pages = {1410 -- 1420}, abstract = {Purpose Age-related macular degeneration (AMD) is a common threat to vision. While classification of disease stages is critical to understanding disease risk and progression, several systems based on color fundus photographs are known. Most of these require in-depth and time-consuming analysis of fundus images. Herein, we present an automated computer-based classification algorithm. Design Algorithm development for AMD classification based on a large collection of color fundus images. Validation is performed on a cross-sectional, population-based study. Participants. We included 120 656 manually graded color fundus images from 3654 Age-Related Eye Disease Study (AREDS) participants. AREDS participants were >55 years of age, and non-AMD sight-threatening diseases were excluded at recruitment. In addition, performance of our algorithm was evaluated in 5555 fundus images from the population-based Kooperative Gesundheitsforschung in der Region Augsburg (KORA; Cooperative Health Research in the Region of Augsburg) study. Methods. We defined 13 classes (9 AREDS steps, 3 late AMD stages, and 1 for ungradable images) and trained several convolution deep learning architectures. An ensemble of network architectures improved prediction accuracy. An independent dataset was used to evaluate the performance of our algorithm in a population-based study. Main Outcome Measures. κ Statistics and accuracy to evaluate the concordance between predicted and expert human grader classification. Results. A network ensemble of 6 different neural net architectures predicted the 13 classes in the AREDS test set with a quadratic weighted κ of 92\% (95\% confidence interval, 89\%-92\%) and an overall accuracy of 63.3\%. In the independent KORA dataset, images wrongly classified as AMD were mainly the result of a macular reflex observed in young individuals. By restricting the KORA analysis to individuals >55 years of age and prior exclusion of other retinopathies, the weighted and unweighted κ increased to 50\% and 63\%, respectively. Importantly, the algorithm detected 84.2\% of all fundus images with definite signs of early or late AMD. Overall, 94.3\% of healthy fundus images were classified correctly. Conclusions Our deep learning algoritm revealed a weighted κ outperforming human graders in the AREDS study and is suitable to classify AMD fundus images in other datasets using individuals >55 years of age.}, subject = {Senile Makuladegeneration}, language = {en} } @article{HartmannWeihererNieberleetal., author = {Hartmann, Robin and Weiherer, Maximilian and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Lamby, Philipp and Reichert, Torsten E. and Taxis, J{\"u}rgen and Ettl, Tobias}, title = {Evaluating smartphone-based 3D imaging techniques for clinical application in oral and maxillofacial surgery: A comparative study with the vectra M5}, series = {Oral and Maxillofacial Surgery}, volume = {29}, journal = {Oral and Maxillofacial Surgery}, publisher = {Springer Nature}, doi = {10.1007/s10006-024-01322-2}, pages = {17}, abstract = {PURPOSE This study aimed to clarify the applicability of smartphone-based three-dimensional (3D) surface imaging for clinical use in oral and maxillofacial surgery, comparing two smartphone-based approaches to the gold standard. METHODS Facial surface models (SMs) were generated for 30 volunteers (15 men, 15 women) using the Vectra M5 (Canfield Scientific, USA), the TrueDepth camera of the iPhone 14 Pro (Apple Inc., USA), and the iPhone 14 Pro with photogrammetry. Smartphone-based SMs were superimposed onto Vectra-based SMs. Linear measurements and volumetric evaluations were performed to evaluate surface-to-surface deviation. To assess inter-observer reliability, all measurements were performed independently by a second observer. Statistical analyses included Bland-Altman analyses, the Wilcoxon signed-rank test for paired samples, and Intraclass correlation coefficients. RESULTS Photogrammetry-based SMs exhibited an overall landmark-to-landmark deviation of M = 0.8 mm (SD =  ± 0.58 mm, n = 450), while TrueDepth-based SMs displayed a deviation of M = 1.1 mm (SD =  ± 0.72 mm, n = 450). The mean volumetric difference for photogrammetry-based SMs was M = 1.8 cc (SD =  ± 2.12 cc, n = 90), and M = 3.1 cc (SD =  ± 2.64 cc, n = 90) for TrueDepth-based SMs. When comparing the two approaches, most landmark-to-landmark measurements demonstrated 95\% Bland-Altman limits of agreement (LoA) of ≤ 2 mm. Volumetric measurements revealed LoA > 2 cc. Photogrammetry-based measurements demonstrated higher inter-observer reliability for overall landmark-to-landmark deviation. CONCLUSION Both approaches for smartphone-based 3D surface imaging exhibit potential in capturing the face. Photogrammetry-based SMs demonstrated superior alignment and volumetric accuracy with Vectra-based SMs than TrueDepth-based SMs.}, language = {en} } @article{MaierPerretSimonetal., author = {Maier, Johannes and Perret, Jerome and Simon, Martina and Schmitt-R{\"u}th, Stephanie and Wittenberg, Thomas and Palm, Christoph}, title = {Force-feedback assisted and virtual fixtures based K-wire drilling simulation}, series = {Computers in Biology and Medicine}, volume = {114}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2019.103473}, pages = {1 -- 10}, abstract = {One common method to fix fractures of the human hand after an accident is an osteosynthesis with Kirschner wires (K-wires) to stabilize the bone fragments. The insertion of K-wires is a delicate minimally invasive surgery, because surgeons operate almost without a sight. Since realistic training methods are time consuming, costly and insufficient, a virtual-reality (VR) based training system for the placement of K-wires was developed. As part of this, the current work deals with the real-time bone drilling simulation using a haptic force-feedback device. To simulate the drilling, we introduce a virtual fixture based force-feedback drilling approach. By decomposition of the drilling task into individual phases, each phase can be handled individually to perfectly control the drilling procedure. We report about the related finite state machine (FSM), describe the haptic feedback of each state and explain, how to avoid jerking of the haptic force-feedback during state transition. The usage of the virtual fixture approach results in a good haptic performance and a stable drilling behavior. This was confirmed by 26 expert surgeons, who evaluated the virtual drilling on the simulator and rated it as very realistic. To make the system even more convincing, we determined real drilling feed rates through experimental pig bone drilling and transferred them to our system. Due to a constant simulation thread we can guarantee a precise drilling motion. Virtual fixtures based force-feedback calculation is able to simulate force-feedback assisted bone drilling with high quality and, thus, will have a great potential in developing medical applications.}, subject = {Handchirurgie}, language = {en} } @inproceedings{SouzaPachecodeAngeloetal., author = {Souza, Luis A. and Pacheco, Andr{\´e} G.C. and de Angelo, Gabriel G. and Oliveira-Santos, Thiago and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {LiwTERM: A Lightweight Transformer-Based Model for Dermatological Multimodal Lesion Detection}, series = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, booktitle = {2024 37th SIBGRAPI Conference on Graphics, Patterns and Images (SIBGRAPI), Manaus, Brazil, 9/30/2024 - 10/3/2024}, publisher = {IEEE}, isbn = {979-8-3503-7603-6}, doi = {10.1109/SIBGRAPI62404.2024.10716324}, pages = {1 -- 6}, abstract = {Skin cancer is the most common type of cancer in the world, accounting for approximately 30\% of all diagnosed tumors. Early diagnosis reduces mortality rates and prevents disfiguring effects in different body regions. In recent years, machine learning techniques, particularly deep learning, have shown promising results in this task, presenting studies that have demonstrated that combining a patient's clinical information with images of the lesion is crucial for improving the classification of skin lesions. Despite that, meaningful use of clinical information with multiple images is mandatory, requiring further investigation. Thus, this project aims to contribute to developing multimodal machine learning-based models to cope with the skin lesion classification task employing a lightweight transformer model. As a main hypothesis, models can take multiple images from different sources as input, along with clinical information from the patient's history, leading to a more reliable diagnosis. Our model deals with the not-trivial task of combining images and clinical information (from anamneses) concerning the skin lesions in a lightweight transformer architecture that does not demand high computation resources but still presents competitive classification results.}, language = {en} } @inproceedings{SouzaJrPassosMendeletal., author = {Souza Jr., Luis Antonio de and Passos, Leandro A. and Mendel, Robert and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Palm, Christoph and Papa, Jo{\~a}o Paulo}, title = {Fine-tuning Generative Adversarial Networks using Metaheuristics}, series = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2021. Proceedings, German Workshop on Medical Image Computing, Regensburg, March 7-9, 2021}, publisher = {Springer Vieweg}, address = {Wiesbaden}, isbn = {978-3-658-33197-9}, doi = {10.1007/978-3-658-33198-6_50}, pages = {205 -- 210}, abstract = {Barrett's esophagus denotes a disorder in the digestive system that affects the esophagus' mucosal cells, causing reflux, and showing potential convergence to esophageal adenocarcinoma if not treated in initial stages. Thus, fast and reliable computer-aided diagnosis becomes considerably welcome. Nevertheless, such approaches usually suffer from imbalanced datasets, which can be addressed through Generative Adversarial Networks (GANs). Such techniques generate realistic images based on observed samples, even though at the cost of a proper selection of its hyperparameters. Many works employed a class of nature-inspired algorithms called metaheuristics to tackle the problem considering distinct deep learning approaches. Therefore, this paper's main contribution is to introduce metaheuristic techniques to fine-tune GANs in the context of Barrett's esophagus identification, as well as to investigate the feasibility of generating high-quality synthetic images for early-cancer assisted identification.}, subject = {Endoskopie}, language = {en} } @inproceedings{SzaloZehnerPalm, author = {Szalo, Alexander Eduard and Zehner, Alexander and Palm, Christoph}, title = {GraphMIC: Medizinische Bildverarbeitung in der Lehre}, series = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2015; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 15. bis 17. M{\"a}rz 2015 in L{\"u}beck}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-46224-9_68}, pages = {395 -- 400}, abstract = {Die Lehre der medizinischen Bildverarbeitung vermittelt Kenntnisse mit einem breiten Methodenspektrum. Neben den Grundlagen der Verfahren soll ein Gef{\"u}hl f{\"u}r eine geeignete Ausf{\"u}hrungsreihenfolge und ihrer Wirkung auf medizinische Bilddaten entwickelt werden. Die Komplexit{\"a}t der Methoden erfordert vertiefte Programmierkenntnisse, sodass bereits einfache Operationen mit großem Programmieraufwand verbunden sind. Die Software GraphMIC stellt Bildverarbeitungsoperationen in Form interaktiver Knoten zur Verf{\"u}gung und erlaubt das Arrangieren, Parametrisieren und Ausf{\"u}hren komplexer Verarbeitungssequenzen in einem Graphen. Durch den Fokus auf das Design einer Pipeline, weg von sprach- und frameworkspezifischen Implementierungsdetails, lassen sich grundlegende Prinzipien der Bildverarbeitung anschaulich erlernen. In diesem Beitrag stellen wir die visuelle Programmierung mit GraphMIC der nativen Implementierung {\"a}quivalenter Funktionen gegen{\"u}ber. Die in C++ entwickelte Applikation basiert auf Qt, ITK, OpenCV, VTK und MITK.}, subject = {Bildverarbeitung}, language = {de} } @incollection{Palm, author = {Palm, Christoph}, title = {History, Core Concepts, and Role of AI in Clinical Medicine}, series = {AI in Clinical Medicine: A Practical Guide for Healthcare Professionals}, booktitle = {AI in Clinical Medicine: A Practical Guide for Healthcare Professionals}, editor = {Byrne, Michael F. and Parsa, Nasim and Greenhill, Alexandra T. and Chahal, Daljeet and Ahmad, Omer and Bargci, Ulas}, edition = {1. Aufl.}, publisher = {Wiley}, isbn = {978-1-119-79064-8}, doi = {10.1002/9781119790686.ch5}, pages = {49 -- 55}, abstract = {The field of AI is characterized by robust promises, astonishing successes, and remarkable breakthroughs. AI will play a major role in all domains of clinical medicine, but the role of AI in relation to the physician is not yet completely determined. The term artificial intelligence or AI is broad, and several different terms are used in this context that must be organized and demystified. This chapter will review the key concepts and methods of AI, and will introduce some of the different roles for AI in relation to the physician.}, language = {en} } @article{RueckertRueckertPalm, author = {R{\"u}ckert, Tobias and R{\"u}ckert, Daniel and Palm, Christoph}, title = {Methods and datasets for segmentation of minimally invasive surgical instruments in endoscopic images and videos: A review of the state of the art}, series = {Computers in Biology and Medicine}, volume = {169}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, address = {Amsterdam}, doi = {10.1016/j.compbiomed.2024.107929}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-69830}, pages = {24}, abstract = {In the field of computer- and robot-assisted minimally invasive surgery, enormous progress has been made in recent years based on the recognition of surgical instruments in endoscopic images and videos. In particular, the determination of the position and type of instruments is of great interest. Current work involves both spatial and temporal information, with the idea that predicting the movement of surgical tools over time may improve the quality of the final segmentations. The provision of publicly available datasets has recently encouraged the development of new methods, mainly based on deep learning. In this review, we identify and characterize datasets used for method development and evaluation and quantify their frequency of use in the literature. We further present an overview of the current state of research regarding the segmentation and tracking of minimally invasive surgical instruments in endoscopic images and videos. The paper focuses on methods that work purely visually, without markers of any kind attached to the instruments, considering both single-frame semantic and instance segmentation approaches, as well as those that incorporate temporal information. The publications analyzed were identified through the platforms Google Scholar, Web of Science, and PubMed. The search terms used were "instrument segmentation", "instrument tracking", "surgical tool segmentation", and "surgical tool tracking", resulting in a total of 741 articles published between 01/2015 and 07/2023, of which 123 were included using systematic selection criteria. A discussion of the reviewed literature is provided, highlighting existing shortcomings and emphasizing the available potential for future developments.}, subject = {Deep Learning}, language = {en} }