@article{LaufSchmiederVolynec, author = {Lauf, Wolfgang and Schmieder, G. and Volynec, I. A.}, title = {The automorphism space Σ(G) of a domain without punctiform prime ends}, series = {The Journal of Geometric Analysis}, volume = {10}, journal = {The Journal of Geometric Analysis}, number = {4}, publisher = {Springer}, issn = {1050-6926}, doi = {10.1007/BF02921993}, pages = {697 -- 712}, abstract = {Let G ⊆ ℂ be a simply connected domain and let Σ (G) be its group of conformal automorphisms with the topology of uniform chordal convergence on G. In 1984 Gaier raised the question whether the connectedness of the space Σ (G) implies that the domain G has only punctiform prime ends. As a contribution to answering this question in this paper the authors use suitable spike Junctions to construct a bounded domain without any punctiform prime end such that its automorphism space Σ (G) is not discrete, but totally disconnected.}, language = {en} } @inproceedings{GreinerCurrleEttletal., author = {Greiner, Nina and Currle, Edda and Ettl, Katrin and Frommeld, Debora and Haug, Sonja and Kudienko, Natalie and Lauer, Norina and Lichtenauer, Norbert and Meussling-Sentpali, Annette and Middel, Luise and Mohr, Christa and Pfingsten, Andrea and Popp, Christof and Raptis, Georgios and Weber, Karsten}, title = {Technik im Alltag - Technik zum Leben : Telepr{\"a}senzgest{\"u}tzte Pflege und Therapie bei Schlaganfallpatient*innen}, series = {Kann Digital Pflege?: Clusterkonferenz ,,Zukunft der Pflege", 3., September 2020, N{\"u}rnberg}, volume = {Konferenzband Teil 1}, booktitle = {Kann Digital Pflege?: Clusterkonferenz ,,Zukunft der Pflege", 3., September 2020, N{\"u}rnberg}, address = {K{\"o}nigstetten, {\"O}sterreich}, issn = {2710-3048}, pages = {57 -- 61}, abstract = {Vorgestellt wird das Forschungsvorhaben des interdisziplin{\"a}ren Forschungsprojekts „DeinHaus 4.0 - Telepr{\"a}senzroboter f{\"u}r die Pflege und Unterst{\"u}tzung von Schlaganfallpatientinnen und -patienten (TePUS)". {\"U}ber geeignete Robotersysteme sollen den Proband*innen individuell abgestimmte Angebote aus den Bereichen Telenursing und Teletherapie zur Verf{\"u}gung gestellt werden. Hierbei werden einerseits Telepr{\"a}senzsitzungen durchgef{\"u}hrt, anderseits kommen verschiedene Apps zum Einsatz. Die Interventionen werden mittels eines Mixed-Methods-Ansatzes evaluiert. Begleitend werden Technikakzeptanz und Nutzungsbereitschaft untersucht.}, subject = {Schlaganfall}, language = {de} } @unpublished{FrummetSlanyAmlingetal., author = {Frummet, Alexander and Slany, Emanuel and Amling, Jonas and Lang, Moritz and Scheele, Stephan}, title = {Explainable information retrieval in the audit domain}, doi = {10.48550/arXiv.2507.03479}, pages = {4}, abstract = {Conversational agents such as Microsoft Copilot and Google Gemini assist users with complex search tasks but often generate misleading or fabricated references. This undermines trust, particularly in high-stakes domains such as medicine and finance. Explainable information retrieval (XIR) aims to address this by making search results more transparent and interpretable. While most XIR research is domain-agnostic, this paper focuses on auditing -- a critical yet underexplored area. We argue that XIR systems can support auditors in completing their complex task. We outline key challenges and future research directions to advance XIR in this domain.}, language = {en} } @article{WeberFrommeldGerhardsetal., author = {Weber, Karsten and Frommeld, Debora and Gerhards, Helene and Krug, Henriette and Kokott, Linda Ellen and Bittner, Uta}, title = {Speaking about enhancement - methodological issues and historical examples}, series = {American Journal of Bioethics Neuroscience}, volume = {11}, journal = {American Journal of Bioethics Neuroscience}, number = {4}, doi = {10.1080/21507740.2020.1831650}, pages = {254 -- 256}, language = {en} } @inproceedings{MuehlhausenGomezLaueretal., author = {M{\"u}hlhausen, Sara and Gomez, Sarah and Lauer, Norina and Baumann, Timo}, title = {Cross lingual transfer learning does not improve aphasic speech recognition}, series = {Elektronische Sprachsignalverarbeitung 2025: Tagungsband der 36. Konferenz Halle/Saale, 05.-07. M{\"A}RZ 2025}, booktitle = {Elektronische Sprachsignalverarbeitung 2025: Tagungsband der 36. Konferenz Halle/Saale, 05.-07. M{\"A}RZ 2025}, editor = {Grawunder, Sven}, publisher = {TUDpress}, address = {Dresden}, isbn = {978-3-95908-803-9}, issn = {0940-6832}, doi = {10.35096/othr/pub-8051}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-80518}, pages = {8}, abstract = {In addressing the particular linguistic challenges posed by patients suffering from aphasia, a language disorder, this paper proposes a fine-tuning approach to enhance the speech recognition capabilities of existing models. The available aphasic research data in German is highly limited. To address this constraint, we propose a cross-lingual transfer approach to utilize English data to improve performance in German. This advancement aims to support the development of a therapy platform tailored for patients with aphasia. For the base speech recognition model, we choose to use OpenAI's Whisper model, and for fine-tuning, we make use of TalkBank's AphasiaBank. The experimental findings demonstrate that the transcription of aphasic audio with Whisper is less successful than non-aphasic audio. However, fine-tuning the transcription in the respective language resulted in an enhancement of its quality. In contrast, fine-tuning the transcription in another language and expecting a transfer of the learned aphasic speech properties led to a deterioration in its quality.}, subject = {Automatische Spracherkennung}, language = {en} } @article{EbnerFrikelLorenzetal., author = {Ebner, Andrea and Frikel, J{\"u}rgen and Lorenz, Dirk and Schwab, J. and Haltmeier, Markus}, title = {Regularization of inverse problems by filtered diagonal frame decomposition}, series = {Applied and Computational Harmonic Analysis}, volume = {62}, journal = {Applied and Computational Harmonic Analysis}, number = {January}, publisher = {Elsevier}, doi = {10.1016/j.acha.2022.08.005}, pages = {66 -- 83}, abstract = {Inverse problems are at the heart of many practical problems such as image reconstruction or nondestructive testing. A characteristic feature is their instability with respect to data perturbations. To stabilize the inversion process, regularization methods must be developed and applied. In this paper, we introduce the concept of filtered diagonal frame decomposition, which extends the classical filtered SVD to the case of frames. The use of frames as generalized singular systems allows a better match to a given class of potential solutions and is also beneficial for problems where the SVD is not analytically available. We show that filtered diagonal frame decompositions yield convergent regularization methods, derive convergence rates under source conditions and prove order optimality. Our analysis applies to bounded and unbounded forward operators. As a practical application of our tools, we study filtered diagonal frame decompositions for inverting the Radon transform as an unbounded operator on L2(R2).}, language = {en} } @misc{OPUS4-276, title = {Angewandte Forschung in der Wirtschaftsinformatik: Prozesse, Technologien, Anwendungen, Systeme und Management}, editor = {Barton, Thomas and Herrmann, Frank and Meister, Vera G. and M{\"u}ller, Christian and Seel, Christian}, publisher = {mana-Buch}, address = {Heide}, organization = {Arbeitskreis Wirtschaftsinformatik an Fachhochschulen (AKWI)}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-2767}, subject = {Wirtschaftsinformatik}, language = {de} } @misc{OPUS4-278, title = {Angewandte Forschung in der Wirtschaftsinformatik: Prozesse, Technologien, Anwendungen, Systeme und Management}, editor = {Barton, Thomas and Herrmann, Frank and Meister, Vera G. and M{\"u}ller, Christian and Seel, Christian}, publisher = {mana-Buch}, address = {Heide}, organization = {Arbeitskreis Wirtschaftsinformatikan Fachhochschulen (AKWI)}, doi = {10.15771/978-3-944330-56-3}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-2785}, language = {de} } @misc{OPUS4-280, title = {Angewandte Forschung in der Wirtschaftsinformatik}, editor = {Barton, Thomas and Herrmann, Frank and Meister, Vera G. and M{\"u}ller, Christian and Seel, Christian and Steffens, Ulrike}, publisher = {mana-Buch}, address = {Heide}, organization = {Arbeitskreis Wirtschaftsinformatik an Hochschulen f{\"u}r angewandte Wissenschaften}, doi = {10.15771/10.15771/978-3-944330-59-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-2809}, subject = {Wirtschaftsinformatik}, language = {de} } @misc{OPUS4-286, title = {Angewandte Forschung in der Wirtschaftsinformatik 2019}, editor = {Wolf, Martin R. and Barton, Thomas and Herrmann, Frank and Meister, Vera G. and M{\"u}ller, Christian and Seel, Christian}, publisher = {mana-Buch}, address = {Heide}, organization = {Arbeitskreis Wirtschaftsinformatik an Hochschulen f{\"u}r Angewandte Wissenschaften im deutschsprachigen Raum (AKWI)}, doi = {10.15771/978-3-944330-62-4}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-2864}, subject = {Wirtschaftsinformatik}, language = {de} } @inproceedings{GoeppelFrikelHaltmeier, author = {G{\"o}ppel, Simon and Frikel, J{\"u}rgen and Haltmeier, Markus}, title = {Data-proximal Neural Networks for Limited-view CT}, series = {Bildverarbeitung f{\"u}r die Medizin 2025 : Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025 : Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas M.}, publisher = {Springer Fachmedien Wiesbaden}, address = {Wiesbaden}, isbn = {978-3-658-47421-8}, doi = {10.1007/978-3-658-47422-5_41}, pages = {185 -- 190}, abstract = {Limited-angle computed tomography (CT) requires solving an inverse problem that is both ill-conditioned and underdetermined. In recent years, learned reconstruction methods have proven highly effective in addressing this challenge. Most of these methods follow a two-step process: first, an initial reconstruction method is applied to the data to generate an auxiliary reconstruction; second, a neural network is used to map the auxiliary reconstruction closer to the ground truth images. However, when applied to unseen data, there are no guarantees that the network's output will remain consistent with the available measurement data. To address this, we recently introduced a data-proximal network architecture. In this paper, we implement this approach for limited-angle CT and compare its performance with a standard residual network and a null space network.}, language = {en} } @inproceedings{SeligBauerFrikeletal., author = {Selig, Tim and Bauer, Patrick and Frikel, J{\"u}rgen and M{\"a}rz, Thomas and Storath, Martin and Weinmann, Andreas}, title = {Two-stage Approach for Low-dose and Sparse-angle CT Reconstruction using Backprojection}, series = {Bildverarbeitung f{\"u}r die Medizin 2025 (BVM 2025): Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2025 (BVM 2025): Proceedings, German Conference on Medical Image Computing, Regensburg March 09-11, 2025}, editor = {Palm, Christoph and Breininger, Katharina and Deserno, Thomas M. and Handels, Heinz and Maier, Andreas and Maier-Hein, Klaus H. and Tolxdorff, Thomas M.}, publisher = {Springer VS}, address = {Wiesbaden}, isbn = {978-3-658-47421-8}, doi = {10.1007/978-3-658-47422-5_67}, pages = {286 -- 291}, abstract = {This paper presents a novel two-stage approach for computed tomography (CT) reconstruction, focusing on sparse-angle and low-dose setups to minimize radiation exposure while maintaining high image quality. Two-stage approaches consist of an initial reconstruction followed by a neural network for image refinement. In the initial reconstruction, we apply the backprojection (BP) instead of the traditional filtered backprojection (FBP). This enhances computational speed and offers potential advantages for more complex geometries, such as fan-beam and cone-beam CT. Additionally, BP addresses noise and artifacts in sparse-angle CT by leveraging its inherent noise-smoothing effect, which reduces streaking artifacts common in FBP reconstructions. For the second stage, we fine-tune the DRUNet proposed by Zhang et al. to further improve reconstruction quality. We call our method BP-DRUNet and evaluate its performance on a synthetically generated ellipsoid dataset alongside thewell-established LoDoPaBCT dataset. Our results show that BP-DRUNet produces competetive results in terms of PSNR and SSIM metrics compared to the FBP-based counterpart, FBPDRUNet, and delivers visually competitive results across all tested angular setups.}, language = {en} } @article{HillebrandWestnerMatschi, author = {Hillebrand, Patrick and Westner, Markus and Matschi, Markus}, title = {Schl{\"u}sselfaktoren erfolgreicher CIOs}, series = {HMD Praxis der Wirtschaftsinformatik}, volume = {59}, journal = {HMD Praxis der Wirtschaftsinformatik}, publisher = {Springer Nature}, doi = {10.1365/s40702-022-00867-8}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-33751}, pages = {762 -- 779}, abstract = {Die vorliegende Studie untersucht Schl{\"u}sselfaktoren erfolgrei- cher CIOs in deutschen Großunternehmen. Mit einer mittleren Verweildauer (Median) von 4,0 Jahren weisen deutsche CIOs, die mit 43 \% noch {\"u}berwiegend an den CFO berichten, im Vergleich zu anderen C-Level-Positionen eine deutlich k{\"u}rzere Verweildauer im Amt auf. Die Ergebnisse aus 60 Interviews mit erfolgreichen deutschsprachigen CIOs, die prim{\"a}r {\"u}ber eine {\"u}berdurchschnittlich lange Verweildauer verf{\"u}gen, lassen verschiedene Schl{\"u}sselfaktoren f{\"u}r den Erfolg erkennen: Grundvoraussetzung ist stets die Gew{\"a}hrleistung eines sicheren und effizienten IT-Betriebs. {\"U}ber effektive und innovative Change-Projekte machen die interviewten CIOs den IT-Mehrwert transparent und agieren als Br{\"u}ckenbauer zwischen IT und Fachbereichen. Dadurch wirken sie positiv auf die Firmenkultur ein und etablieren die IT nachhaltig in den Fachbereichen als Erfolgsfaktor. Erfolgreiche CIOs selbst sind keine „Techies", sondern zeichnen sich durch hohe F{\"u}hrungskompetenz und ein hohes Gesch{\"a}ftsverst{\"a}ndnis, gepaart mit vision{\"a}rem Denken aus. Dadurch gelingt es ihnen, die IT zukunftsorientiert auszurichten und Anforderungen und Potenziale f{\"u}r und aus den Fachbereichen fr{\"u}hzeitig zu antizipieren. Die zuk{\"u}nftige Entwicklung der CIO-Organisation und der Paradigmen in der IT wird durch die Studienteilnehmer hingegen teilweise kontrovers diskutiert - so gibt es beispielsweise bei der Beurteilung der Sinnhaftigkeit und Relevanz der CDO-Position noch kein einheitliches Meinungsbild.}, language = {de} } @article{HillebrandWestnerMatschi, author = {Hillebrand, Patrick and Westner, Markus and Matschi, Markus}, title = {Key factors of successful CIOs}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-66882}, pages = {22}, abstract = {This study examines key factors of successful CIOs in large German companies. With a median tenure of 4.0 years, German CIOs, most of whom (43\%) still report to the CFO, have a significantly shorter tenure compared to other C‑level positions. The results of 60 interviews with successful German-speaking CIOs, who primarily have an above-average tenure, reveal various key factors for successful CIOs: The foundation for success is to ensure secure and efficient IT operations. Through effective and innovative change pro-jects, the CIOs interviewed make the added value of IT transparent and act as bridge-builders between IT and the business. In this way, they have a positive impact on the corporate culture and establish IT as a success factor in the business in the long term. Successful CIOs themselves are not "techies" but are characterized by a high level of leadership competence and a high level of business expertise, coupled with visionary thinking. This enables them to align IT in a future-oriented manner and to anticipate de-mands and potential for and from the business units at an early stage. The future devel-opment of the CIO organization and the paradigms in IT, on the other hand, was discussed controversially by some of the study participants—for example, there is still no uniform opinion on the relevance of the CDO position.}, language = {en} } @misc{HornPippigRaptisetal., author = {Horn, L. C and Pippig, S. and Raptis, Georgios and Fischer, Uta and Kohler, U. and Hentschel, Bettina and Martin, Rosemarie}, title = {00083 PAI-1 AND UFA AS PARAMETERS OF TUMORASSOCIATED PROTEOLYSIS IN CERVICAL CARCINOMA}, series = {International Journal of Gynecologic Cancer}, volume = {15}, journal = {International Journal of Gynecologic Cancer}, number = {Suppl 2}, publisher = {BMJ}, issn = {1048-891X}, doi = {10.1136/ijgc-00009577-200509001-00083}, language = {en} } @article{SchuheggerKaraGalkaetal., author = {Schuhegger, Lukas and Kara, Sinan and Galka, Stefan and D{\"u}nnweber, Jan and Meißner, Sebastian}, title = {Umzugsplanung mit Fabrikdatenmodellen und Virtual Reality}, series = {Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb}, volume = {119}, journal = {Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb}, number = {11}, publisher = {de Gruyter}, issn = {2511-0896}, doi = {10.1515/zwf-2024-1154}, pages = {799 -- 804}, abstract = {Angesichts eines dynamischen industriellen Umfelds m{\"u}ssen produzierende Unternehmen flexibel bleiben und sich kontinuierlich anpassen, um ihre Wettbewerbsf{\"a}higkeit zu erhalten. Eine effektive Planung von Restrukturierungen in Produktionssystemen, oft als Umzugsplanung bezeichnet, ist entscheidend, um Verz{\"o}gerungen zu vermeiden und Stillstandszeiten zu minimieren. Die Identifikation von Umzugsf{\"a}llen, welche den Umfang umzuziehender Ressourcen wie Maschinen, Anlagen, Ausr{\"u}stungen und Arbeitspl{\"a}tzen beschreiben und die Definition zugeh{\"o}riger Aufgaben sind essenzielle Schritte der Umzugsplanung. Dieser Beitrag stellt eine Vorgehensweise zur Bestimmung von Umzugsf{\"a}llen mittels eines Fabrikdatenmodells vor, erg{\"a}nzt durch eine Visualisierung in Virtual Reality, um Planende bei der Umzugsplanung zu unterst{\"u}tzen.}, language = {de} } @inproceedings{FranzZuritaDiefenthaleretal., author = {Franz, Maja and Zurita, P{\´i}a and Diefenthaler, Markus and Mauerer, Wolfgang}, title = {Co-Design of Quantum Hardware and Algorithms in Nuclear and High Energy Physics}, series = {EPJ Web of Conferences}, volume = {295}, booktitle = {EPJ Web of Conferences}, publisher = {EDP Sciences}, doi = {10.1051/epjconf/202429512002}, abstract = {Quantum computing (QC) has emerged as a promising technology, and is believed to have the potential to advance nuclear and high energy physics (NHEP) by harnessing quantum mechanical phenomena to accelerate computations. In this paper, we give a brief overview of the current state of quantum computing by highlighting challenges it poses and opportunities it offers to the NHEP community. Noisy intermediate-scale quantum (NISQ) computers, while limited by imperfections and small scale, may hold promise for near-term quantum advantages when coupled with co-designed quantum algorithms and special-purpose quantum processing units (QPUs). We explore various applications in NHEP, including quantum simulation, event classification, and realtime experiment control, emphasising the potential of variational quantum circuits and related techniques. To identify current interests of the community, we perform an analysis of recent literature in NHEP related to QC.}, language = {en} } @misc{HaugLauerMeusslingSentpalietal., author = {Haug, Sonja and Lauer, Norina and Meussling-Sentpali, Annette and Mohr, Christa and Pfingsten, Andrea and Raptis, Georgios and Weber, Karsten and Bahr, Gudrun and Currle, Edda and Ettl, Katrin and Frommeld, Debora and Greiner, Nina and Kudienko, Natalie and Lichtenauer, Norbert and Middel, Luise and M{\"u}cke, Vanessa and Popp, Christof}, title = {Telepr{\"a}senzroboter f{\"u}r die Pflege und Unterst{\"u}tzung von Schlaganfallpatinnen und -patienten (TePUS)}, series = {Virtuelle Tagung "DeinHaus 4.0 Oberpfalz", 2020, Ostbayerische Technische Hochschule Regensburg}, journal = {Virtuelle Tagung "DeinHaus 4.0 Oberpfalz", 2020, Ostbayerische Technische Hochschule Regensburg}, language = {de} } @article{PolzGlawionGebissoetal., author = {Polz, Julius and Glawion, Luca and Gebisso, Hiob and Altenstrasser, Lukas and Graf, Maximilian and Kunstmann, Harald and Vogl, Stefanie and Chwala, Christian}, title = {Temporal Super-Resolution, Ground Adjustment, and Advection Correction of Radar Rainfall Using 3-D-Convolutional Neural Networks}, series = {IEEE Transactions on Geoscience and Remote Sensing}, volume = {62}, journal = {IEEE Transactions on Geoscience and Remote Sensing}, doi = {10.1109/TGRS.2024.3371577}, pages = {1 -- 10}, abstract = {Weather radars are highly sophisticated tools for quantitative precipitation estimation (QPE) and provide observations with unmatched spatial representativeness. However, their indirect measurement of precipitation high above ground leads to strong systematic errors compared to direct rain gauge measurements. Additionally, the temporal undersampling from 5-min instantaneous radar measurements requires advection correction. We present ResRadNet, a 3-D-convolutional residual neural network approach, to reduce these errors and, at the same time, increase the temporal resolution of the radar rainfall fields by a 5-min short-range prediction of 1-min time-steps. The network is trained to process spatiotemporal sequences of radar rainfall estimates from a composite product derived from 17 C-band weather radars in Germany. In contrast to previous approaches, we present a method that emphasizes the generation of spatiotemporally consistent and advection-corrected country-wide rainfall maps. Our approach significantly increased the Pearson correlation coefficient (PCC) of the radar product (from 0.63 to 0.74) and decreased the root mean squared error (mse) by 22\% when compared to 247 rain gauges at a 5-min resolution. An additional large-scale comparison to eight years of data from 1138 independent manual daily gauges confirmed that the improvement is robust and transferable to new locations. Overall, our study shows the benefits of using 3-D convolutional neural networks (CNNs) for weather radar rainfall estimation to provide 1-min, ground-adjusted, that is, bias-corrected with respect to on-ground sensors, and advection-corrected radar rainfall estimates.}, language = {en} } @inproceedings{HecknerFuchsRaabetal., author = {Heckner, Markus and Fuchs, Markus and Raab, F. and Wolff, Christian}, title = {Monitoring students' mobile app coding behavior data analysis based on IDE and browser interaction logs}, series = {IEEE Global Engineering Education Conference, EDUCON, 03-05 April 2014, Istanbul, Turkey}, booktitle = {IEEE Global Engineering Education Conference, EDUCON, 03-05 April 2014, Istanbul, Turkey}, publisher = {IEEE}, doi = {10.1109/EDUCON.2014.6826202}, pages = {892 -- 899}, abstract = {This paper describes a case study of assessing student's coding behavior and skills in a realistic development setting. Students had to solve typical programming problems in the context of app development for the Android platform using the Eclipse IDE. Data was analyzed using IDE as well as browser interaction logs. In addition, screen recordings of the students' interaction with the IDE provide further insight. In this paper we present the first results of our ongoing work.}, language = {en} } @techreport{KunhardtZerthEberletal., author = {Kunhardt, Horst and Zerth, J{\"u}rgen and Eberl, Inge and Flemming, Daniel and Hilbert, Josef and Weber, Karsten and Kohls, Niko and Rester, David and Engel, Lars and Engel, Christian}, title = {Hochschulinitiative f{\"u}r eine personenzentrierte und nachhaltige Gestaltung von Technik und Digitalisierung in der Pflege - Wege von der Anwendungsforschung zur Implementierung und Evaluierung in der Praxis und Lebenswelt}, doi = {10.13140/RG.2.2.32095.64160}, abstract = {The professors from universities and universities of applied sciences and their educational partners who support this initiative are all joined by a research agenda and a sense of responsibility, according to which it is essential for applied research to further explore the effects of technological assistance systems and digital solutions in and for realistic settings with theoretically and methodologically sound and scientific support, based on the highest possible level of evidence, and to make a contribution to evidence-based translation and transferability to society.}, language = {mul} } @article{ReichenederWestnerMatschi, author = {Reicheneder, Matthias and Westner, Markus and Matschi, Markus}, title = {IT-Near- und Offshoring in deutschen Großunternehmen: Aktueller Stand, Motive und Auswahlkriterien}, series = {HMD Praxis der Wirtschaftsinformatik}, journal = {HMD Praxis der Wirtschaftsinformatik}, publisher = {Springer Nature}, address = {Wiesbaden}, issn = {1436-3011}, doi = {10.1365/s40702-025-01150-2}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-79582}, pages = {19}, abstract = {Die vorliegende Studie untersucht die aktuelle IT-Near- und Offshoring-Praxis in deutschen Großunternehmen. Daf{\"u}r wurden 33 CIOs und IT-F{\"u}hrungskr{\"a}fte mittels Interviews befragt. Haupttreiber der geografischen Verlagerung von IT-Services ins Ausland ist die Ressourcenverf{\"u}gbarkeit in anderen L{\"a}ndern, bedingt durch den Fachkr{\"a}ftemangel in Deutschland. Dies trifft auch auf die Auswahl geeigneter IT-Services f{\"u}r das Near- und Offshoring zu. W{\"a}hrend IT-Near- und Offshoring historisch prim{\"a}r aufgrund von Kosteneinsparungen initiiert wurde, spielt dieser Aspekt heute eine untergeordnete Rolle. Die Interviewteilnehmer und -teilnehmerinnen sehen in Kosteneinsparungen zwar immer noch einen positiven Effekt, erwarten aber, dass dieser in Zukunft aufgrund der Angleichung des Kostenniveaus schwinden wird. Die Unternehmen verlagern haupts{\"a}chlich Applikationsbetrieb und -entwicklung, erkennen jedoch zunehmend das Potenzial, auch h{\"o}herwertige Aufgaben, z. B. Projektmanagement und Unternehmensarchitekturmanagement, aus geografisch entfernteren L{\"a}ndern erbringen zu lassen. Unternehmen, die ausschließlich IT-Nearshoring betreiben, erbringen weniger als ein Drittel ihrer IT-Services aus nahegelegenen L{\"a}ndern. Firmen mit IT-Offshoring-Pr{\"a}senzen weisen einen h{\"o}heren IT-Near‑/Offshoringgrad auf und verlagern knapp die H{\"a}lfte der eigenen IT-Serviceerbringung ins Ausland. Die beliebtesten L{\"a}nder sind Portugal f{\"u}r das Nearshoring und Indien f{\"u}r das Offshoring.}, language = {de} } @article{MauererJoblinTamburrietal., author = {Mauerer, Wolfgang and Joblin, Mitchell and Tamburri, Damian and Paradis, Carlos and Kazman, Rick and Apel, Sven}, title = {In Search of Socio-Technical Congruence: A Large-Scale Longitudinal Study}, series = {IEEE Transactions on Software Engineering (TSE)}, volume = {48}, journal = {IEEE Transactions on Software Engineering (TSE)}, number = {8}, publisher = {IEEE}, doi = {10.1109/TSE.2021.3082074}, pages = {3159 -- 3184}, abstract = {This paper describes a large-scale empirical study investigating the relevance of socio-technical congruence over key basic software quality metrics, namely, bugs and churn. That is, we explore whether alignment or misalignment of social communication structures and technical dependencies in large software projects influences software quality. To this end, we have defined a quantitative and operational notion of socio-technical congruence, which we call /socio-technical motif congruence/ (STMC). STMC is a measure of the degree to which developers working on the same file or on two related files, need to communicate. As socio-technical congruence is a complex and multi-faceted phenomenon, the interpretability of the results is one of our main concerns, so we have employed a careful mixed-methods statistical analysis. In particular, we provide analyses with similar techniques as employed by seminal work in the field to ensure comparability of our results with the existing body of work. The major result of our study, based on an analysis of 25 large open-source projects, is that STMC is /not/ related to project quality measures---software bugs and churn---in any temporal scenario. That is, we find no statistical relationship between the alignment of developer tasks and developer communications on one hand, and project outcomes on the other hand. We conclude that, wherefore congruence does matter as literature shows, then its measurable effect lies elsewhere.}, language = {en} } @misc{HaugLauerMeusslingSentpalietal., author = {Haug, Sonja and Lauer, Norina and Meussling-Sentpali, Annette and Mohr, Christa and Pfingsten, Andrea and Raptis, Georgios and Weber, Karsten and Bahr, Gudrun and Currle, Edda and Ettl, Katrin and Frommeld, Debora and Greiner, Nina and Kudienko, Natalie and Lichtenauer, Norbert and Middel, Luise and M{\"u}cke, Vanessa and Popp, Christof}, title = {TePUS - Telepr{\"a}senzroboter f{\"u}r die Pflege}, series = {TRIOKON 2020 : Wissenschaft, Wirtschaft, Gesellschaft in Ostbayern, 29.09.2019, Landshut}, journal = {TRIOKON 2020 : Wissenschaft, Wirtschaft, Gesellschaft in Ostbayern, 29.09.2019, Landshut}, language = {de} } @inproceedings{GabrielWittmannHackeretal., author = {Gabriel, Christian and Wittmann, Christoffer and Hacker, B. and Mauerer, Wolfgang and Huntington, E. and Sabuncu, M. and Marquardt, Christoph and Leuchs, Gerd}, title = {A high-speed secure quantum random number generator based on vacuum states}, series = {IEEE/OSA Conference on Lasers and Electro-Optics (CLEO), 06-11 May 2012, San Jose CA USA}, booktitle = {IEEE/OSA Conference on Lasers and Electro-Optics (CLEO), 06-11 May 2012, San Jose CA USA}, publisher = {Optical Society of America}, abstract = {A high-speed continuous-variable quantum random bit generator with an expected effective bit generation rate of up to 10 GBit/s is presented. The obtained bit sequences are truly random and unique, i.e. they cannot be known by an adversary.}, language = {en} } @phdthesis{Ramsauer, author = {Ramsauer, Ralf}, title = {OSS Architecture for Mixed-Criticality Systems}, publisher = {Leibniz Universit{\"a}t Hannover}, pages = {188}, abstract = {Computer-based automation in industrial appliances led to a growing number of logically dependent, but physically separated embedded control units per appliance. Many of those components are safety-critical systems, and require adherence to safety standards, which is inconsonant with the relentless demand for features in those appliances. Features lead to a growing amount of control units per appliance, and to a increasing complexity of the overall software stack, being unfavourable for safety certifications. Modern CPUs provide means to revise traditional separation of concerns design primitives: the consolidation of systems, which yields new engineering challenges that concern the entire software and system stack. Multi-core CPUs favour economic consolidation of formerly separated systems with one efficient single hardware unit. Nonetheless, the system architecture must provide means to guarantee the freedom from interference between domains of different criticality. System consolidation demands for architectural and engineering strategies to fulfil requirements (e.g., real-time or certifiability criteria) in safety-critical environments. In parallel, there is an ongoing trend to substitute ordinary proprietary base platform software components by mature OSS variants for economic and engineering reasons. There are fundamental differences of processual properties in development processes of OSS and proprietary software. OSS in safety-critical systems requires development process assessment techniques to build an evidence-based fundament for certification efforts that is based upon empirical software engineering methods. In this thesis, I will approach from both sides: the software and system engineering perspective. In the first part of this thesis, I focus on the assessment of OSS components: I develop software engineering techniques that allow to quantify characteristics of distributed OSS development processes. I show that ex-post analyses of software development processes can be used to serve as a foundation for certification efforts, as it is required for safety-critical systems. In the second part of this thesis, I present a system architecture based on OSS components that allows for consolidation of mixed-criticality systems on a single platform. Therefore, I exploit virtualisation extensions of modern CPUs to strictly isolate domains of different criticality. The proposed architecture shall eradicate any remaining hypervisor activity in order to preserve realtime capabilities of the hardware by design, while guaranteeing strict isolation across domains.}, language = {en} } @inproceedings{SchmidbauerWinterspergerLobeetal., author = {Schmidbauer, Lukas and Wintersperger, Karen and Lobe, Elisabeth and Mauerer, Wolfgang}, title = {Polynomial Reduction Methods and their Impact on QAOA Circuits}, series = {IEEE International Conference on Quantum Software (QSW), 7-13 July 2024, Shenzhen, China}, booktitle = {IEEE International Conference on Quantum Software (QSW), 7-13 July 2024, Shenzhen, China}, doi = {10.1109/QSW62656.2024.00018}, pages = {35 -- 45}, abstract = {Abstraction layers are of paramount importance in software architecture, as they shield the higher-level formulation of payload computations from lower-level details. Since quantum computing (QC) introduces many such details that are often unaccustomed to computer scientists, an obvious desideratum is to devise appropriate abstraction layers for QC. For discrete optimisation, one such abstraction is to cast problems in quadratic unconstrained binary optimisation (QUBO) form, which is amenable to a variety of quantum approaches. However, different mathematically equivalent forms can lead to different behaviour on quantum hardware, ranging from ease of mapping onto qubits to performance scalability. In this work, we show how using higher-order problem formulations (that provide better expressivity in modelling optimisation tasks than plain QUBO formulations) and their automatic transformation into QUBO form can be used to leverage such differences to prioritise between different desired non-functional properties for quantum optimisation. Based on a practically relevant use-case and a graph-theoretic analysis, we evaluate how different transformation approaches influence widely used quantum performance metrics (circuit depth, gates count, gate distribution, qubit scaling), and also consider the classical computational efforts required to perform the transformations, as they influence possibilities for achieving future quantum advantage. Furthermore, we establish more general properties and invariants of the transformation methods. Our quantitative study shows that the approach allows us to satisfy different trade-offs, and suggests various possibilities for the future construction of general-purpose abstractions and automatic generation of useful quantum circuits from high-level problem descriptions.}, language = {en} } @inproceedings{VoelklMelzerDuennweberetal., author = {V{\"o}lkl, Jakob and Melzer, Matthias and D{\"u}nnweber, Jan and Sarkar, Amitrajit}, title = {Dynamic Route Planning for a Data Collecting Luggage Transport Service}, series = {18th IEEE International Conference on Control \& Automation, June 18-21, 2024, Reykjav{\´i}k, Iceland}, booktitle = {18th IEEE International Conference on Control \& Automation, June 18-21, 2024, Reykjav{\´i}k, Iceland}, publisher = {IEEE}, abstract = {Control and Automation of services of the urban infrastructure offered to citizens and tourists are elementary parts of a smart city. But both rely on a stable supply of data from sensors spread across the whole city, e. g., the fill level sensors of waste bins needed for a waste management tool which we developed in a collaboration with the Regensburg city council for the on-demand collection of waste bins. Europe has a lot of historic cities like Regensburg with narrow streets and huge building walls, some made from granite and fieldstones, which often represents an insurmountable obstacle to wireless data transmission. The reduction of the road traffic volume poses an additional challenge for city planners. By means of networked planning and simulation software, the situation, state and efficiency of citywide logistic services can be monitored and optimized. In the course of such optimizations, we propose the combination of digital and logistic services. As an example, we show that monitoring state information, such as the waste bin fill levels, can be accomplished using the same vehicles and the same planning software, that is used for luggage transportation. Moreover, we describe how we adapted a solver for a variant of the TSP, namely the prize-collecting traveling salesman, to optimize the route planning dynamically.}, language = {en} } @unpublished{BorgFrikelJorgensenetal., author = {Borg, Leise and Frikel, J{\"u}rgen and J{\o}rgensen, Jakob Sauer and Quinto, Eric Todd}, title = {Theorems that Characterize Artifacts for Arbitrary Limited X-ray CT Data}, edition = {version 6}, abstract = {This article provides a mathematical classification of artifacts from arbitrary incom-plete X-ray tomography data when using the classical filtered backprojection algorithm. Usingmicrolocal analysis, we prove that all artifacts arise from points at the boundary of the data set.Our results show that, depending on the geometry of the data set boundary, two types of artifactscan arise: object-dependent and object-independent artifacts. The object-dependent artifacts aregenerated by singularities of the object being scanned and these artifacts can extend all along lines.This is a generalization of the streak artifacts observed in limited angle CT. The article also char-acterizes two new phenomena: the object-independent artifacts are caused only by the geometryof the data set boundary; they occur along lines if the boundary of the data set is not smooth andalong curves if the boundary of the data set is smooth. In addition to the geometric descriptionof artifacts, the article also provides characterizations of their strength in Sobolev scale in certaincases. Moreover, numerical reconstructions from simulated and real data are presented illustratingour theorems.This work is motivated by a reconstruction we present from a synchrotron data set in whichartifacts along lines appeared that were independent of the object.The results of this article apply to a wide range of well-known incomplete data problems, in-cluding limited angle CT and region of interest tomography, as well as to unconventional x-ray CTimaging setups. Some of those problems are explicitly addressed in this article, theoretically and numerically.}, language = {en} } @article{WalterSchmidtKoerkel, author = {Walter, Sebastian F. and Schmidt, Andreas and K{\"o}rkel, Stefan}, title = {Adjoint-based optimization of experimental designs with many control variables}, series = {Journal of Process Control}, volume = {24}, journal = {Journal of Process Control}, number = {10}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0959-1524}, doi = {10.1016/j.jprocont.2014.06.019}, pages = {1504 -- 1515}, abstract = {We propose a method for an efficient optimization of experimental designs, using a combination of discrete adjoint computations, Taylor arithmetic and matrix calculus. Compared to the state of the art of using finite differences or the forward mode of automatic differentiation, our proposed approach leads to a reduction of the relative temporal complexity from linear to constant time in the number of control variables and measurement weights. We demonstrate that the advantageous complexity results are not only of theoretical nature, but lead to significant speedups in practice as well. With our implementation we are very close to the theoretical bound of the cheap gradient principle. We present one academic (spatially discretized heat equation) and two industrial application examples (biochemical process/Diesel-oxidation catalysis process) where we achieve speedups that range between 10 and 100. In addition to our core results, we also describe an efficient adjoint approach for the treatment of differential algebraic equations and present adjoint formulas for constrained least-squares problems.}, language = {de} } @inproceedings{BorgJorgensenFrikeletal., author = {Borg, Leise and J{\o}rgensen, Jakob Sauer and Frikel, J{\"u}rgen and Quinto, Eric Todd and Sporring, Jon}, title = {Reducing artifacts from varying projection truncations}, series = {3rd International Conference on Tomography of Materials and Structures, Lund, Sweden, 26-30 June 2017, ICTMS2017-65-1}, booktitle = {3rd International Conference on Tomography of Materials and Structures, Lund, Sweden, 26-30 June 2017, ICTMS2017-65-1}, abstract = {We study samples with full and partial occlusion causing streak artifacts, and propose two mod-ifications of filtered backprojection for artifact removal. Data is obtained by the SPring-8 synchrotron using a monochromatic parallel-beam scan [1]. Thresholding in the sinogram segments the metal, resulting in edges on which we apply 1) a smooth transition, or 2) a Dirichlet boundary condition.}, language = {en} } @inproceedings{FrikelQuinto, author = {Frikel, J{\"u}rgen and Quinto, Eric Todd}, title = {Artifacts in limited view tomography}, series = {Oberwolfach Reports}, volume = {11}, booktitle = {Oberwolfach Reports}, number = {3}, editor = {Burger, Martin and Quinto, Eric Todd and Louis, Alfred K.}, doi = {10.4171/OWR/2014/37}, pages = {2047 -- 2114}, language = {en} } @incollection{BaumannHusseinMeyerSickendiek, author = {Baumann, Timo and Hussein, Hussein and Meyer-Sickendiek, Burkhard}, title = {Free Verse Prosodies: Identifying and Classifying Spoken Poetry Using Literary and Computational Perspectives (Rhythmicalizer)}, series = {Mixing Methods: Practical Insights from the Humanities in the Digital Age}, booktitle = {Mixing Methods: Practical Insights from the Humanities in the Digital Age}, editor = {Schneider, Birgit and L{\"o}ffler, Beate and Mager, Tino and Hein, Carola}, publisher = {Bielefeld University Press}, address = {Bielefeld}, doi = {10.1515/9783839469132-018}, pages = {167 -- 186}, abstract = {At least 80\% of modern and postmodern poems exhibit neither rhyme nor metrical schemes such as iamb or trochee. However, does this mean that they are free of any rhythmical features?TheUS American research onfree verse prosody claimsthe opposite: Modern poets like Whitman, the Imagists, the Beat poets and contemporary Slam poets have developed a postmetrical idea of prosody, using rhythmical features of everyday language, prose, and musical styles like Jazz or Hip Hop. It has spawned a large and complex variety intheir poetic prosodies which,however,appearto bemuchharderto quantify and regularize than traditional patterns. In our project, we examinethe largest portal for spoken poetry Lyrikline and analysed and classified such rhythmical patterns by using pattern recognition and classification techniques. We integrate a human-in-the-loop approach in which we interleave manual annotation with computational modelling and data-based analysis. Our results are integrated into the website of Lyrikline. Our follow-up project makes our research results available to a wider audience, in particular to high school-level teaching.}, language = {en} } @inproceedings{HuangMelzerDuennweber, author = {Huang, Wenfei and Melzer, Matthias and D{\"u}nnweber, Jan}, title = {Optimizing Smart Retail by Experiment Using an Online AI Model Exploration Interface}, series = {2024 Winter Simulation Conference (WSC), 15-18 December 2024, Orlando, FL, USA}, booktitle = {2024 Winter Simulation Conference (WSC), 15-18 December 2024, Orlando, FL, USA}, publisher = {IEEE}, doi = {10.1109/WSC63780.2024.10838897}, pages = {2631 -- 2642}, abstract = {Smart retail technologies save grocery store operators a lot of work. At the same time, these technologies produce valuable data for building sustainable and economical inventory management strategies. AI models can be trained for sales forecasting using the data. The forecasts support the provisioning of fresh food over the whole week and help reducing food waste. In this paper, we present a Web portal which we developed to allow grocery store operators experiments with AI models revealing interrelations between observed and anticipated customer behavior. Clickable diagrams facilitate the exploration of data sets combining historical data and synthetically generated data. Pricing and ordering can be adapted accordingly to the simulated forecasts. By means of a case study, we show that our simulations are not only useful for predicting future sales but for other smart retail tasks as well.}, language = {en} } @inproceedings{HoessParadisKazmanetal., author = {Hoess, Nicole and Paradis, Carlos and Kazman, Rick and Mauerer, Wolfgang}, title = {Does the Tool Matter? Exploring Some Causes of Threats to Validity in Mining Software Repositories}, series = {2025 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Montreal, QC, Canada, March, 4-7, 2025}, booktitle = {2025 IEEE International Conference on Software Analysis, Evolution and Reengineering (SANER), Montreal, QC, Canada, March, 4-7, 2025}, publisher = {IEEE}, isbn = {979-8-3315-3510-0}, doi = {10.1109/SANER64311.2025.00067}, pages = {645 -- 656}, abstract = {Software repositories are an essential source of information for software engineering research on topics such as project evolution and developer collaboration. Appropriate mining tools and analysis pipelines are therefore an indispensable precondition for many research activities. Ideally, valid results should not depend on technical details of data collection and processing. It is, however, widely acknowledged that mining pipelines are complex, with a multitude of implementation decisions made by tool authors based on their interests and assumptions. This raises the questions if (and to what extent) tools agree on their results and are interchangeable. In this study, we use two tools to extract and analyse ten large software projects, quantitatively and qualitatively comparing results and derived data to better understand this concern. We analyse discrepancies from a technical point of view, and adjust code and parametrisation to minimise replication differences. Our results indicate that despite similar trends, even simple metrics such as the numbers of commits and developers may differ by up to 500\%. We find that such substantial differences are often caused by minor technical details. We show how tool-level and data post-processing changes can overcome these issues, but find they may require considerable efforts. We summarise identified causes in our lessons learned to help researchers and practitioners avoid common pitfalls, and reflect on implementation decisions and their influence in ensuring obtained data meets explicit and implicit expectations. Our findings lead us to hypothesise that similar uncertainties exist in other analysis tools, which may limit the validity of conclusions drawn in tool-centric research.}, language = {en} } @inproceedings{BaumannBussAttereretal., author = {Baumann, Timo and Buß, Okko and Atterer, Michaela and Schlangen, David}, title = {Evaluating the Potential Utility of ASR N-Best Lists for Incremental Spoken Dialogue Systems}, series = {Proceedings of Interspeech 2009 : speech and intelligence ; 6 - 10 September, 2009, Brighton, UK}, booktitle = {Proceedings of Interspeech 2009 : speech and intelligence ; 6 - 10 September, 2009, Brighton, UK}, publisher = {ISCA}, address = {Brighton, UK}, doi = {10.21437/Interspeech.2009-318}, pages = {1031 -- 1034}, abstract = {The potential of using ASR n-best lists for dialogue systems has often been recognised (if less often realised): it is often the case that even when the top-ranked hypothesis is erroneous, a bet- ter one can be found at a lower rank. In this paper, we describe metrics for evaluating whether the same potential carries over to incremental dialogue systems, where ASR output is consumed and reacted upon while speech is still ongoing. We show that even small N can provide an advantage for semantic process- ing, at a cost of a computational overhead.}, language = {en} } @inproceedings{BurgerSarkarKirschetal., author = {Burger, David and Sarkar, Amitrajit and Kirsch, Konstantin and D{\"u}nnweber, Jan}, title = {Combining Fill-Level Sensing with Route Optimization for a More Efficient Waste Collection}, series = {ECDG 2018 - European Conference on Digital Government, 2018, Santiago De Compostela, Spain}, booktitle = {ECDG 2018 - European Conference on Digital Government, 2018, Santiago De Compostela, Spain}, editor = {Bouzas Lorenzo, Ram{\´o}n}, isbn = {9781912764037}, pages = {24 -- 31}, abstract = {We tackle the problem that collecting all the waste of modern cities within the scheduled time spans becomes increasingly challenging, while the waste collection team is not supplemented accordingly in many places, e.g. by more staff or vehicles. Separating waste has become natural for urban populations, as recycling is ecologically necessary. However, the separation also leads to a continuously increasing number of containers which are logistically unmanageable without computer-aided collection scheduling. Regensburg recently introduced a new program for the collection of biological waste, which extends the private collection of such waste by a few hundred public biological waste containers. Computer support was pretty basic so far, i.e. the collection times were recorded and the routes were sporadically rescheduled manually in an Excel sheet. We show that much shorter waste collection routes can be found automatically using a dynamic version of the classic Ant Colony Optimization (ACO) algorithm for shortening the collection routes. Moreover, we sketch an loT (lnternet-of-Things) approach to "lntelligent Waste Containers" which we equip with ultrasonic sensors monitoring the fill levels, which allows us to skip poorly used containers during the collection and reposition them on demand. By means of a computer simulation with online visualization, we illustrate that our optimized and dynamically adapted collection routes lead to significant time savings. Indeed, more than twice the number of currently used containers can be collected within the same time. We also report about the first experiments that we conducted with real sensors. The presented work is the result of a cooperation between four contributing parties: 1) Technical University of Regensburg (OTH); 2) Ara Institute of Canterbury; 3) Regensburg's Governmental Department of Waste Management; and 4) an industry partner (kpit.com). We compare the outcome of our project with related work and we sketch some future perspectives and ideas for transforming Regensburg into an even more automatized, environmentally friendly "smart city".}, language = {en} } @article{SchedlbauerRaptisLudwig, author = {Schedlbauer, J{\"u}rgen and Raptis, Georgios and Ludwig, Bernd}, title = {Medical informatics labor market analysis using web crawling, web scraping, and text mining}, series = {International Journal of Medical Informatics}, volume = {150}, journal = {International Journal of Medical Informatics}, number = {June}, publisher = {Elsevier}, issn = {1386-5056}, doi = {10.1016/j.ijmedinf.2021.104453}, abstract = {Objectives The European University Association (EUA) defines "employability" as a major goal of higher education. Therefore, competence-based orientation is an important aspect of education. The representation of a standardized job profile in the field of medical informatics, which is based on the most common labor market requirements, is fundamental for identifying and conveying the learning goals corresponding to these competences. Methods To identify the most common requirements, we extracted 544 job advertisements from the German job portal, STEPSTONE. This process was conducted via a program we developed in R with the "rvest" library, utilizing web crawling, web extraction, and text mining. After removing duplicates and filtering for jobs that required a bachelor's degree, 147 job advertisements remained, from which we extracted qualification terms. We categorized the terms into six groups: professional expertise, soft skills, teamwork, processes, learning, and problem-solving abilities. Results The results showed that only 45\% of the terms are related to professional expertise, while 55\% are related to soft skills. Studies of employee soft skills have shown similar results. The most prevalent terms were programming, experience, project, and server. Our second major finding is the importance of experience, further underlining how essential practical skills are. Conclusions Previous studies used surveys and narrative descriptions. This is the first study to use web crawling, web extraction, and text mining. Our research shows that soft skills and specialist knowledge carry equal weight. The insights gained from this study may be of assistance in developing curricula for medical informatics.}, language = {en} } @inproceedings{DuennweberSarkarPuthiyadathetal., author = {D{\"u}nnweber, Jan and Sarkar, Amitrajit and Puthiyadath, Vimal Kumar and Barde, Omkar}, title = {A Tale of Four Cities - Improving Bus and Waste Collection Schedules in Practical Smart City Applications}, series = {Computer Information Systems and Industrial Management}, booktitle = {Computer Information Systems and Industrial Management}, editor = {Saeed, Khalid and Dvorsk{\´y}, Jiř{\´i}}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-84340-3}, doi = {10.1007/978-3-030-84340-3_30}, pages = {369 -- 380}, abstract = {Computer-based Improvements of waste collection and public transport procedures are often a part of smart city initiatives. When we envision an ideal bus network, it will primarily connect the most crowded bus stops. Similarly, an ideal waste collection vehicle will arrive at every container exactly at the time when it is fully loaded. Beyond doubt, this will reduce traffic and support environmentally friendly intentions like waste separation, as it will make more containers manageable. A difficulty of putting that vision into practice is that vehicles cannot always be where they are needed. Knowing the best time for arriving at a position is not insufficient for finding the optimal route. Therefore, we compare four different approaches to optimized routing: Regensburg, Christchurch, Malaysia, and Bangalore. Our analysis shows that the best schedules result from adapting field-tested routes frequently based on sensor measurements and route optimizing computations.}, language = {en} } @inproceedings{StadlerSarkarDuennweber, author = {Stadler, Timo and Sarkar, Amitrajit and D{\"u}nnweber, Jan}, title = {Bus Demand Forecasting for Rural Areas Using XGBoost and Random Forest Algorithm}, series = {CISIM2021: 20th International Conference on Computer Information Systems and Industrial Management Applications, September 24-26 2021, Ełk, Poland}, booktitle = {CISIM2021: 20th International Conference on Computer Information Systems and Industrial Management Applications, September 24-26 2021, Ełk, Poland}, editor = {Saeed, Khalid and Dvorsk{\´y}, Jiř{\´i}}, publisher = {Springer}, address = {Cham}, isbn = {978-3-030-84340-3}, doi = {10.1007/978-3-030-84340-3_36}, pages = {442 -- 453}, abstract = {In recent years, mobility solutions have experienced a significant upswing. Consequently, it has increased the importance of forecasting the number of passengers and determining the associated demand for vehicles. We analyze all bus routes in a rural area in contrast to other work that predicts just a single bus route. Some differences in bus routes in rural areas compared to cities are highlighted and substantiated by a case study data using Roding, a town in the rural district of Cham in northern Bavaria, as an example. Data collected and we selected a random forest model that lets us determine the passenger demand, bus line effectiveness, or general user behavior. The prediction accuracy of the selected model is currently 87\%. The collected data helps to build new mobility-as-a-service solutions, such as on-call buses or dynamic route optimizations, as we show with our simulation.}, subject = {{\"O}ffentlicher Personennahverkehr}, language = {en} } @inproceedings{SarkarTraubinger, author = {Sarkar, Amitrajit and Traubinger, Thomas}, title = {IS Resilience Decision Priorities at German SMEs}, series = {27th American Conference on Information Systems 2021 (AMCIS 2021): Digital Innovation and Entrepreneurship, Virtual Conference, August 9-13, 2021}, booktitle = {27th American Conference on Information Systems 2021 (AMCIS 2021): Digital Innovation and Entrepreneurship, Virtual Conference, August 9-13, 2021}, abstract = {IS resilience addresses the challenge of securing organizational information systems against disruptions to ensure the continuation of daily business processes and to guarantee the resilience of the organization as a whole. In this paper, we adopt Agency Theory and Weill's IT Governance framework to investigate the decision priorities of senior executives in the context of IS resilience planning, which falls under the broader umbrella of IT governance. The management of IS resilience involves different fields of action types and interests which decision-makers within organizations must confront. This is of special importance for small- to medium-sized companies (SMEs) where IS often is not a dedicated or small corporate function. This paper identifies distinctive character types of corporate decision-makers at SMEs in a European context and their priorities in IS resilience. The study was performed using Q-methodology. It was able to identify and describe three additional types of decision-makers in comparison to previous research thereby clearly showing that the field has progressed. These character types provide an informative insight into the priorities of IS resilience planning in SMEs and show differences and commonalities between decision-makers.}, language = {en} } @incollection{SchneiderSonarWeber, author = {Schneider, Diana and Sonar, Arne and Weber, Karsten}, title = {Zwischen Automatisierung und ethischem Anspruch - Disruptive Effekte des KI-Einsatzes in und auf Professionen der Gesundheitsversorgung}, series = {K{\"u}nstliche Intelligenz im Gesundheitswesen}, booktitle = {K{\"u}nstliche Intelligenz im Gesundheitswesen}, publisher = {Springer}, address = {Wiesbaden}, doi = {10.1007/978-3-658-33597-7_14}, pages = {325 -- 348}, abstract = {K{\"u}nstliche Intelligenz (KI) und algorithmische Systeme der Entscheidungsfindung entwickeln sich rapide zu wichtigen neuen Technologien in unserem t{\"a}glichen Leben und es kann kaum Zweifel geben, dass sie einen großen Einfluss auf die Gesellschaft haben - auch in Professionen der Gesundheitsversorgung. Aufgrund ihrer Wirkkraft auf vorhandene {\"o}konomische und organisatorische Strukturen werden diesen Technologien gleichzeitig disruptive Effekte auf ihre jeweiligen Anwendungsdom{\"a}nen zugesprochen. Im vorliegenden Beitrag wird an den Beispielen der klinischen Medizin sowie der Einzelfallhilfe der Sozialen Arbeit dargelegt, in welcher Weise KI professionelle Praxen in der Gesundheitsversorgung ver{\"a}ndert, welche Konsequenzen daraus f{\"u}r die Professionen erwachsen und welche Handlungsaufforderungen sich f{\"u}r die jeweiligen Professionen ergeben.}, language = {de} } @misc{WeberSonarGerhardsetal., author = {Weber, Karsten and Sonar, Arne and Gerhards, Helene and Bittner, Uta and Krug, Henriette and Reuter-Oppermann, Melanie and Pumplun, Luisa and M{\"u}ller, Helene and Buxmann, Peter and Schneider, Diana and Zoglauer, Thomas}, title = {K{\"u}nstliche Intelligenz und Gesundheit}, editor = {Sonar, Arne and Weber, Karsten}, publisher = {Franz Steiner}, address = {Stuttgart}, isbn = {978-3-515-12977-0}, doi = {10.25162/9783515129770}, abstract = {Der Einsatz von k{\"u}nstlicher Intelligenz im Gesundheitsbereich verspricht besonders großen Nutzen durch eine bessere Versorgung sowie effizientere Abl{\"a}ufe und bietet damit letztlich auch {\"o}konomische Vorteile. Dem stehen unter anderem Bef{\"u}rchtungen entgegen, dass sich durch den Einsatz von k{\"u}nstlicher Intelligenz das Arzt-Patienten-Verh{\"a}ltnis ver{\"a}ndern k{\"o}nnte, Arbeitspl{\"a}tze gef{\"a}hrdet seien oder die {\"O}konomisierung des Gesundheitswesens einen weiteren Schub erfahren k{\"o}nnte. Zuweilen wird die Debatte um diese Technologie, zumal in der {\"O}ffentlichkeit, emotional und fern sachlicher Argumente gef{\"u}hrt. Die Autorinnen und Autoren untersuchen die Geschichte des KI-Einsatzes in der Medizin, deren {\"o}ffentliche Wahrnehmung, Governance der KI, die M{\"o}glichkeiten und Grenzen der Technik sowie Einsatzgebiete, die bisher noch nicht oder nur wenig im Fokus der Aufmerksamkeit waren. Dabei erweist sich die KI als leistungsf{\"a}higes Werkzeug, das zahlreiche ethische und soziale Fragen aufwirft, die bei der Einf{\"u}hrung anderer Technologien bereits gestellt wurden; allerdings gibt es auch neue Herausforderungen, denen sich Professionen, Politik und Gesellschaft stellen m{\"u}ssen.}, language = {de} } @inproceedings{BaumannAttererSchlangen, author = {Baumann, Timo and Atterer, Michaela and Schlangen, David}, title = {Assessing and Improving the Performance of Speech Recognition for Incremental Systems}, series = {NAACL '09: Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, May 31 - June 5, 2009, Boulder, Colorado, USA}, booktitle = {NAACL '09: Proceedings of Human Language Technologies: The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, May 31 - June 5, 2009, Boulder, Colorado, USA}, publisher = {Association for Computational Linguistics}, isbn = {978-1-932432-41-1}, pages = {380 -- 388}, abstract = {In incremental spoken dialogue systems, par- tial hypotheses about what was said are re- quired even while the utterance is still ongo- ing. We define measures for evaluating the quality of incremental ASR components with respect to the relative correctness of the par- tial hypotheses compared to hypotheses that can optimize over the complete input, the tim- ingof hypothesisformationrelative to the por- tion ofthe inputthey areabout, andhypothesis stability, defined as the number of times they are revised. We show that simple incremen- tal post-processing can improve stability dra- matically, at the cost of timeliness (from 90\% of edits of hypotheses being spurious down to 10\% at a lag of 320ms). The measures are not independent,and we show how system de- signers can find a desired operating point for their ASR. To our knowledge, we are the first to suggest and examine a variety of measures for assessing incremental ASR and improve performance on this basis.}, language = {en} } @inproceedings{BaumannBussSchlangen, author = {Baumann, Timo and Buß, Okko and Schlangen, David}, title = {InproTK in Action: Open-Source Software for Building German-Speaking Incremental Spoken Dialogue Systems}, series = {Electronic speech signal processing 2010 : proceedings of the 21st conference, Berlin, 8 - 10 September 2010}, booktitle = {Electronic speech signal processing 2010 : proceedings of the 21st conference, Berlin, 8 - 10 September 2010}, publisher = {TUDpress}, address = {Berlin, Germany}, pages = {204 -- 211}, abstract = {We present INPROTK, a toolkit for building incremental spoken dia-logue systems. Incremental spoken dialogue systems (systems that may react whilethe user's utterance is ongoing) are a fairly recent research topic and allow for ex-citing new features. Even though toolkits exist that help in building conventionaldialogue systems, INPROTK offers both a tested architecture for building incre-mental SDSs as well as many of the building blocks necessary when building suchsystems. With INPROTK a researcher can avoid many of the technical difficulties,which hopefully further fosters research in this area.}, language = {en} } @inproceedings{AttererBaumannSchlangen, author = {Atterer, Michaela and Baumann, Timo and Schlangen, David}, title = {No Sooner Said Than Done: Testing the Incrementality of Semantic Interpretations of Spontaneous Speech}, series = {Proceedings of Interspeech 2009 : 6 - 10 September 2009, Brighton, U.K.}, booktitle = {Proceedings of Interspeech 2009 : 6 - 10 September 2009, Brighton, U.K.}, publisher = {International Speech Communication Association}, address = {Brighton, UK}, doi = {10.21437/Interspeech.2009-539}, pages = {1855 -- 1858}, abstract = {Ideally, a spoken dialogue system should react without much delay to a user's utterance. Such a system would already select an object, for instance, before the user has finished her utterance about moving this particular object to a particular place. A prerequisite for such a prompt reaction is that semantic representations are built up on the fly and passed on to other modules. Few approaches to incremental semantics construction exist, and, to our knowledge, none of those has been systematically tested on a spontaneous speech corpus. In this paper, we develop measures to test empirically on transcribed spontaneous speech to what extent we can create semantic interpretation on the fly with an incremental semantic chunker that builds a frame semantics.}, language = {en} } @inproceedings{BaumannSchlangen, author = {Baumann, Timo and Schlangen, David}, title = {Predicting the Micro-Timing of User Input for an Incremental Spoken Dialogue System that Completes a User's Ongoing Turn}, series = {SIGDIAL 2011 Conference, 12th annual meeting of the Special Interest Group on Discourse and Dialogue, Co-located with ACL HLT 2011, Portland, Oregon, 17 - 18 June 2011}, booktitle = {SIGDIAL 2011 Conference, 12th annual meeting of the Special Interest Group on Discourse and Dialogue, Co-located with ACL HLT 2011, Portland, Oregon, 17 - 18 June 2011}, publisher = {Curran}, address = {Red Hook, NY}, isbn = {978-1-61839-242-8}, pages = {120 -- 129}, abstract = {We present the novel task of predicting tem-poral features of continuations of user input,while that input is still ongoing. We show that the remaining duration of an ongoing word, aswell as the duration of the next can be predicted reasonably well, and we put this information touse in a system that synchronously completesa user's speech. While we focus on collaborative completions, the techniques presented here may also be useful for the alignment of back-channels and immediate turn-taking in anincremental SDS, or to synchronously monitorthe user's speech fluency for other reasons.}, language = {en} } @inproceedings{BuschmeierBaumannDorschetal., author = {Buschmeier, Hendrik and Baumann, Timo and Dorsch, Benjamin and Kopp, Stefan and Schlangen, David}, title = {Combining Incremental Language Generation and Incremental Speech Synthesis for Adaptive Information Presentation}, series = {SIGDIAL '12: Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Seoul, South Korea, July 5 - 6, 2012}, booktitle = {SIGDIAL '12: Proceedings of the 13th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Seoul, South Korea, July 5 - 6, 2012}, publisher = {Association for Computational Linguistics}, doi = {10.5555/2392800.2392852}, pages = {295 -- 303}, abstract = {Participants in a conversation are normally receptive to their surroundings and their interlocutors, even while they are speaking and can, if necessary, adapt their ongoing utterance. Typical dialogue systems are not receptive and cannot adapt while uttering. We present combin-able components for incremental natural lan-guage generation and incremental speech syn-thesis and demonstrate the flexibility they can achieve with an example system that adapts to a listener's acoustic understanding problems by pausing, repeating and possibly rephrasing problematic parts of an utterance. In an evaluation, this system was rated as significantly more natural than two systems representing the current state of the art that either ignore the interrupting event or just pause; it also has a lower response time.}, language = {en} } @inproceedings{HeintzeBaumannSchlangen, author = {Heintze, Silvan and Baumann, Timo and Schlangen, David}, title = {Comparing Local and Sequential Models for Statistical Incremental Natural Language Understanding}, series = {SIGDIAL '10: Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Tokyo, Japan,September 24 - 25, 2010}, booktitle = {SIGDIAL '10: Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Tokyo, Japan,September 24 - 25, 2010}, publisher = {Association for Computational Linguistics}, address = {Tokyo, Japan}, isbn = {978-1-932432-85-5}, doi = {10.5555/1944506.1944508}, abstract = {Incremental natural language understanding is the task of assigning semantic representations to successively larger prefixes of utterances. We compare two types of statistical models for this task: a) local models, which predict a single class for an input; and b), sequential models, which align a sequence of classes to a sequence of input tokens. We show that, with some modifications, the first type of model can be improved and made to approximate the output of the second, even though the latter is more informative. We show on two different data sets that both types of model achieve comparable performance (significantly better than a baseline), with the first type requiring simpler training data. Results for the first type of model have been reported in the literature; we show that for our kind of data our more sophisticated variant of the model performs better.}, language = {en} } @article{BaumannBussSchlangen, author = {Baumann, Timo and Buß, Okko and Schlangen, David}, title = {Evaluation and Optimisation of Incremental Processors}, series = {Dialogue \& Discourse}, volume = {2}, journal = {Dialogue \& Discourse}, number = {1}, publisher = {OJS}, doi = {10.5087/dad.2011.106}, pages = {113 -- 141}, abstract = {Incremental spoken dialogue systems, which process user input as it unfolds, pose additionalengineering challenges compared to more standard non-incremental systems: Their processingcomponents must be able to accept partial, and possibly subsequently revised input, and mustproduce output that is at the same time as accurate as possible and delivered with as little delay aspossible. In this article, we define metrics that measure how well a given processor meets thesechallenges, and we identify types of gold standards for evaluation. We exemplify these metrics inthe evaluation of several incremental processors that we have developed. We also present genericmeans to optimise some of the measures, if certain trade-offs are accepted. We believe that thiswork will help enable principled comparison of components for incremental dialogue systems andportability of results.}, language = {en} } @incollection{BaumannSchlangen, author = {Baumann, Timo and Schlangen, David}, title = {Interactional Adequacy as a Factor in the Perception of Synthesized Speech}, series = {Proceedings of Speech Synthesis Workshop (SSW8)}, booktitle = {Proceedings of Speech Synthesis Workshop (SSW8)}, address = {Barcelona, Spain}, abstract = {Speaking as part of a conversation is different from reading out aloud. Speech synthesis systems, however, are typically developed using assumptions (at least implicitly) that are more true of the latter than the former situation. We address one particular aspect, which is the assumption that a fully formulated sentence is available for synthesis. We have built a system that does not make this assumption but rather can synthesize speech given incrementally extended input. In an evaluation experiment, we found that in a dynamic domain where what is talked about changes quickly, subjects rated the output of this system as more 'naturally pronounced' than that of a baseline system that employed standard synthesis, despite the synthesis quality objectively being degraded. Our results highlight the importance of considering a synthesizer's ability to support interactive use-cases when determining the adequacy of synthesized speech.}, language = {en} } @inproceedings{BaumannSchlangen, author = {Baumann, Timo and Schlangen, David}, title = {Open-ended, Extensible System Utterances Are Preferred, Even If They Require Filled Pauses}, series = {14th annual meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL 2013) : Metz, France, 22 - 24 August 2013}, booktitle = {14th annual meeting of the Special Interest Group on Discourse and Dialogue (SIGDIAL 2013) : Metz, France, 22 - 24 August 2013}, publisher = {Curran}, address = {Red Hook, NY}, pages = {280 -- 283}, abstract = {In many environments (e. g. sports commentary), situations incrementally unfold over time and often the future appearance of a relevant event can be predicted, but not in all its details or precise timing. We have built a simulation framework that uses our incremental speech synthesis component to assemble in a timely manner complex commentary utterances. In our evaluation, the resulting output is preferred over that from a baseline system that uses a simpler commenting strategy. Even in cases where the incremental system overcommits temporally and requires a filled pause to wait for the upcoming event, the system is preferred over the baseline.}, language = {en} } @inproceedings{AttererBaumannSchlangen, author = {Atterer, Michaela and Baumann, Timo and Schlangen, David}, title = {Towards Incremental End-of-Utterance Detection in Dialogue Systems}, series = {COLING '08: Proceedings of the 22nd International Conference on Computational Linguistics, Manchester United Kingdom August 18 - 22, 2008}, booktitle = {COLING '08: Proceedings of the 22nd International Conference on Computational Linguistics, Manchester United Kingdom August 18 - 22, 2008}, publisher = {Association for Computational Linguistics}, isbn = {978-1-905593-44-6}, pages = {11 -- 14}, abstract = {We define the task of incremental or 0-lag utterance segmentation, that is, the task of segmenting an ongoing speech recognition stream into utterance units, and present first results. We use a combination of hidden event language model, features from an incremental parser, and acoustic / prosodic features to train classifiers on real-world conversational data (from the Switchboard corpus). The best classifiers reach an F-score of around 56\%, improving over baseline and related work.}, language = {en} } @incollection{BaumannSchlangen, author = {Baumann, Timo and Schlangen, David}, title = {Evaluating Prosodic Processing for Incremental Speech Synthesis}, series = {Proceedings of Interspeech 2012}, booktitle = {Proceedings of Interspeech 2012}, publisher = {International Speech Communication Association}, doi = {10.21437/Interspeech.2012-152}, pages = {438 -- 441}, abstract = {Incremental speech synthesis (iSS) accepts input and produces output in consecutive chunks that only together result in a full utterance. Systems that use iSS thus have the ability to adapt their utterances while they are ongoing. Having available less than the full utterance to plan the acoustic realisation has downsides, however, as global optimisation is not possible anymore. In this paper we present a strategy for incrementalizing the symbolic pre-processing component of speech synthesis and assess the influence of a reduction in "lookahead", i. e. in knowledge about the rest of the utterance, on prosodic quality. We found that high quality incremental output can be achieved even with a lookahead of slightly less than one phrase, allowing for timely system reaction.}, language = {en} } @inproceedings{BaumannSchlangen, author = {Baumann, Timo and Schlangen, David}, title = {Inpro_iSS: A Component for Just-In-Time Incremental Speech Synthesis}, series = {The 13th Conference of the European Chapter of the Association for Computational Linguistics - proceedings of the System Demonstrations, April 23 - 27 2012, Avignon France}, booktitle = {The 13th Conference of the European Chapter of the Association for Computational Linguistics - proceedings of the System Demonstrations, April 23 - 27 2012, Avignon France}, publisher = {Association for Computational Linguistics (ACL)}, abstract = {We present a component for incremental speech synthesis (iSS) and a set of applications that demonstrate its capabilities. This component can be used to increase the responsivity and naturalness of spoken interactive systems. While iSS can show its full strength in systems that generate output incrementally, we also discuss how even otherwise unchanged systems may profit from its capabilities.}, language = {en} } @incollection{CarbonelliFeldererJungetal., author = {Carbonelli, Cecilia and Felderer, Michael and Jung, Matthias and Lobe, Elisabeth and Lochau, Malte and Luber, Sebastian and Mauerer, Wolfgang and Ramler, Rudolf and Sch{\"a}fer, Ina and Schroth, Christoph}, title = {Challenges for Quantum Software Engineering: An Industrial Application Scenario Perspective}, series = {Quantum Software: Aspects of Theory and System Design}, booktitle = {Quantum Software: Aspects of Theory and System Design}, editor = {Exman, Iaakov and Perez-Castillo, Ricardo and Piattini, Mario and Felderer, Michael}, publisher = {Springer-Nature}, isbn = {978-3-031-64135-0}, doi = {10.1007/978-3-031-64136-7_12}, abstract = {Quantum software is becoming a key enabler for applying quantum computing to industrial use cases. This poses challenges to quantum software engineering in providing efficient and effective means to develop such software. Eventually, this must be reliably achieved in time, on budget, and in quality, using sound and well-principled engineering approaches. Given that quantum computers are based on fundamentally different principles than classical machines, this raises the question if, how, and to what extent established techniques for systematically engineering software need to be adapted. In this chapter, we analyze three paradigmatic application scenarios for quantum software engineering from an industrial perspective. The respective use cases center around (1) optimization and quantum cloud services, (2) quantum simulation, and (3) embedded quantum computing. Our aim is to provide a concise overview of the current and future applications of quantum computing in diverse industrial settings. We derive presumed challenges for quantum software engineering and thus provide research directions for this emerging field.}, language = {en} } @unpublished{AmmermannMauererSchaefer, author = {Ammermann, Joshua and Mauerer, Wolfgang and Sch{\"a}fer, Ina}, title = {Towards View-based Development of Quantum Software}, abstract = {Quantum computing is an interdisciplinary field that relies on the expertise of many different stakeholders. The views of various stakeholders on the subject of quantum computing may differ, thereby complicating communication. To address this, we propose a view-based quantum development approach based on a Single Underlying Model (SUM) and a supporting quantumIntegrated Development Environment (IDE). We highlight emerging challenges for future research.}, language = {en} } @techreport{SchultzScharfHaueretal., type = {Working Paper}, author = {Schultz, Maximilian and Scharf, Anna and Hauer, Franziska and Haug, Sonja and Weber, Karsten}, title = {KINiro, K{\"u}nstliche Intelligenz f{\"u}r Nichtregierungsorganisationen - Bedarf, Akzeptanz und Umsetzungsm{\"o}glichkeiten. 2. Arbeitspapier: K{\"u}nstliche Intelligenz in Nichtregierungsorganisationen: Explorative Studie in einem neuen Forschungsfeld}, publisher = {OTH Regensburg}, address = {Regensburg}, doi = {10.13140/RG.2.2.10671.75685/1}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-73118}, pages = {35}, abstract = {Hintergrund und Fragestellung Nichtregierungsorganisationen (NRO) sind ein wichtiger Bestandteil der Zivilgesellschaft und interagieren auch mit Regierungen, Unternehmen und anderen gesellschaftlichen Akteuren. Aufgrund der komplexer werdenden Arbeit von NROs scheint K{\"u}nstliche Intelligenz (KI) M{\"o}glichkeiten zur Bew{\"a}ltigung aktueller und zuk{\"u}nftiger Herausforderungen zu bieten. Jedoch ist wenig {\"u}ber die Arbeit von NROs mit KI bekannt. Methodik Es wurden f{\"u}nf explorative Interviews mit Vertreter*innen von Nichtregierungsorganisationen (NROs) zum Thema Wissen, Akzeptanz, Bedarfe und Risikoeinsch{\"a}tzungen gef{\"u}hrt und ausgewertet. Dabei sind NROs aus verschiedenen Handlungsfeldern und Gr{\"o}ße interviewt. Es wurden informelle Vorgespr{\"a}che gef{\"u}hrt, um eine erste Orientierung im Forschungsfeld zu generieren. Aus den Erkenntnissen der Vorgespr{\"a}che und den Ergebnissen des Scoping Reviews ist ein Leitfaden erstellt worden, der zur Orientierung f{\"u}r die Expert*inneninterviews dient. Im Anschluss wurden dann f{\"u}nf explorative leitfadengest{\"u}tzte Expert*inneninterviews durchgef{\"u}hrt und qualitativ ausgewertet. Ergebnisse Ein zentraler Befund ist, dass das Thema KI gerade in den NROs ankommt und es noch keine gefestigten Strukturen und Vorstellungen zum Einsatz von KI gibt. KI wird in einzelnen spezifischen Projekten eingesetzt, ohne dass diese umfassend in Arbeitsabl{\"a}ufe integriert ist. Die Akzeptanz von KI ist generell positiv; die Technologie wird als potenzielle L{\"o}sung f{\"u}r strukturelle Herausforderungen und Unterst{\"u}tzung im Alltag gesehen. Die Nutzung von KI-Anwendungen beschr{\"a}nkt sich jedoch mit Ausnahme von Large Language Models auf Pilotprojekte. Mit j{\"u}ngerem Alter und Techni� kaffinit{\"a}t ist eine h{\"o}here Akzeptanz verbunden. Besonders kritisch werden Anwendungen von KI im Sozial- oder Gesundheitsbereich als Ersatz f{\"u}r menschliche Interaktionen gesehen. Betont werden auch ethische Bedenken und eine hohe Bedeutsamkeit von Datenschutz. Schlussfolgerung K{\"u}nstliche Intelligenz in Nichtregierungsorganisationen ist ein aufkommendes und sich entwickelndes Forschungsthema. Die Interviews unterstreichen den Bedarf an mehr Wissen, ethischen Richtlinien und finanziellen Ressourcen f{\"u}r eine effektive Nutzung von KI in NROs. Ein umfassendes Verst{\"a}ndnis von KI und eine tiefergehende, systematische Integration in Arbeitsabl{\"a}ufe in diesen Organisationen m{\"u}ssen noch entwickelt werden.}, language = {de} } @techreport{ScharfHauerSchultzetal., type = {Working Paper}, author = {Scharf, Anna and Hauer, Franziska and Schultz, Maximilian and Haug, Sonja and Weber, Karsten}, title = {KINiro, K{\"u}nstliche Intelligenz f{\"u}r Nichtregierungsorganisationen - Bedarf, Akzeptanz und Umsetzungsm{\"o}glichkeiten. 1. Arbeitspapier: K{\"u}nstliche Intelligenz in Nichtregierungsorganisationen: Ein Scoping Review {\"u}ber ein junges Forschungsfeld}, publisher = {OTH Regensburg}, address = {Regensburg}, doi = {10.13140/RG.2.2.21668.69765}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-73090}, pages = {24}, abstract = {Hintergrund und Fragestellung Nichtregierungsorganisationen (NRO) sind ein wichtiger Bestandteil der Zivilgesellschaft und interagieren auch mit Regierungen, Unternehmen und anderen gesellschaftlichen Akteuren. Aufgrund der komplexer werdenden Arbeit von NROs scheint K{\"u}nstliche Intelligenz (KI) M{\"o}glichkeiten zur Bew{\"a}ltigung aktueller und zuk{\"u}nftiger Herausforderungen zu bieten. Jedoch ist wenig {\"u}ber die Arbeit von NROs mit KI bekannt. Daher besch{\"a}ftigt sich das Projekt KINiro in diesem ersten Working Paper mit der Frage, welche (nicht-)wissenschaftlichen Erkenntnisse zum Themenkomplex NROs und KI bereits vorliegen. Methodik Es wurde ein Scoping Review zur Erfassung (nicht-)wissenschaftlicher Texte zu NROs und KI durchgef{\"u}hrt. Die systematische Literaturrecherche wurde in den Datenbanken Web of Science, Science Gate und WISO durchgef{\"u}hrt. In den Review wurden schließlich 14 Titel eingeschlossen und qualitativ analysiert. Ergebnisse Die Mehrheit der gefundenen Treffer sind Pressemitteilungen. Unter den Treffern befinden sich lediglich zwei (wissenschaftliche) Studien. Die NROs setzen sich auf verschiedenen Ebenen mit der Thematik von KI auseinander, wobei sich zwei Herangehensweisen unterscheiden lassen. Einige NROs nehmen am gesellschaftlichen Diskurs {\"u}ber den Einsatz von KI teil und treiben diesen in theoretischer Hinsicht voran, ohne die Technik dabei selbst zu nutzen. Andere NROs integrieren die KI-Systeme praktisch in ihre Arbeitsabl{\"a}ufe oder f{\"u}hren Projekte zum Zweck der NRO mit KI-Unterst{\"u}tzung durch. F{\"u}r die Entwicklung von KI-Anwendungen wird mit For-Profit-Unternehmen kooperiert und die Expertise der Unternehmen mit Daten der NROs kombiniert. Durch den Einsatz von KI erhoffen sich NROs einen gezielteren Einsatz von Ressourcen. Hierbei zeigt sich, dass f{\"u}r die Nutzung in KI-Systemen ein interdisziplin{\"a}rer Konsens {\"u}ber Standards in Datenerhebung und Speicherung als notwendig angesehen wird. Schlussfolgerung Aus der geringen Anzahl an gefundenen Texten, insbesondere (wissenschaftlichen) Studien, und dem Ver{\"o}ffentlichungszeitraum, der in den vergangenen sieben Jahren liegt, l{\"a}sst sich schließen, dass es sich um einen jungen Forschungsbereich handelt. Die ausgeschlossenen Titel zeigen auf, dass NROs aktuell noch h{\"a}ufiger mit der Digitalisierunge allgemein besch{\"a}ftigt sind und die Auseinandersetzung mit KI erst noch am Anfang steht.}, language = {de} } @inproceedings{HauerSchultzHaugetal., author = {Hauer, Franziska and Schultz, Maximilian and Haug, Sonja and Weber, Karsten}, title = {Acceptance and Usage of AI Applications in Health-Focused NGOs}, series = {dHealth 2025, Proceedings of the 19th Health Informatics Meets Digital Health Conference}, booktitle = {dHealth 2025, Proceedings of the 19th Health Informatics Meets Digital Health Conference}, editor = {Baumgartner, Martin and Hayn, Dieter and Pfeifer, Bernhard and Schreier, G{\"u}nter}, publisher = {IOS Press}, address = {Wien}, isbn = {978-1-64368-592-2}, doi = {10.3233/SHTI250172}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-80613}, pages = {123 -- 128}, abstract = {Background: AI applications promise to be a valuable tool for healthfocused NGOs. While often operating with limited resources, these organizations recognize the potential of AI to streamline processes and support workflows through automation. However, challenges such as data privacy concerns—especially regarding personal medical data—and the low prioritization of implementation hinder progress. Objectives: This study examines the extent to which German health-focused NGOs can currently benefit from the application of AI. It evaluates demands, available resources, and use cases. Methods: A health-focused subset of data from project KINiro was analyzed. This research utilized a mixed-methods approach, combining two rounds of qualitative interviews with a quantitative survey. Results: The study revealed that while health-focused NGOs are in the early stages of AI implementation, there are hurdles such as ethical concerns and a lack of resources. Conclusion: AI has the potential to support health-focused NGOs in their work, if the challenges like resources, ethics, and data privacy are effectively addressed.}, language = {en} } @article{Schoenberger, author = {Sch{\"o}nberger, Manuel}, title = {Applicability of Quantum Computing on Database Query Optimization}, series = {SIGMOD '22: proceedings of the 2022 International Conference on Management of Data : June 12-17, 2022, Philadelphia, PA, USA}, journal = {SIGMOD '22: proceedings of the 2022 International Conference on Management of Data : June 12-17, 2022, Philadelphia, PA, USA}, publisher = {ACM}, address = {New York, NY}, doi = {10.1145/3514221.3520257}, pages = {2512 -- 2514}, abstract = {We evaluate the applicability of quantum computing on two fundamental query optimization problems, join order optimization and multi query optimization (MQO). We analyze the problem dimensions that can be solved on current gate-based quantum systems and quantum annealers, the two currently commercially available architectures. First, we evaluate the use of gate-based systems on MQO, previously solved with quantum annealing. We show that, contrary to classical computing, a different architecture requires involved adaptations. We moreover propose a multi-step reformulation for join ordering problems to make them solvable on current quantum systems. Finally, we systematically evaluate our contributions for gate-based quantum systems and quantum annealers. Doing so, we identify the scope of current limitations, as well as the future potential of quantum computing technologies for database systems.}, language = {en} } @misc{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Quantum Computing for DB - Applicability on Multi Query Optimization and Join Order Optimization}, series = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam, 2022}, journal = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam, 2022}, language = {en} } @inproceedings{PeldszusBussBaumannetal., author = {Peldszus, Andreas and Buß, Okko and Baumann, Timo and Schlangen, David}, title = {Joint Satisfaction of Syntactic and Pragmatic Constraints Improves Incremental Spoken Language Understanding}, series = {EACL 2012 Joint Workshop of LINGVIS \& UNCLH, Visualization of Linguistic Patterns and Uncovering Language History from Multilingual Resources, Proceedings of the Workshop, April 23 - 24 2012, Avignon France}, booktitle = {EACL 2012 Joint Workshop of LINGVIS \& UNCLH, Visualization of Linguistic Patterns and Uncovering Language History from Multilingual Resources, Proceedings of the Workshop, April 23 - 24 2012, Avignon France}, publisher = {The Association for Computational Linguistics}, isbn = {978-1-937284-19-0}, pages = {514 -- 523}, abstract = {We present a model of semantic processing of spoken language that (a) is robust against ill-formed input, such as can be expected from automatic speech recognisers, (b) respects both syntactic and pragmatic constraints in the computation of most likely interpretations, (c) uses a principled, expressive semantic representation formalism (RMRS) with a well-defined model theory, and (d) works continuously (producing meaning representations on a word-by-word basis, rather than only for full utterances) and incrementally (computing only the additional contribution by the new word, rather than re-computing for the whole utterance-so-far). We show that the joint satisfaction of syntactic and pragmatic constraints improves the performance of the NLU component (around 10 \% absolute, over a syntax-only baseline).}, language = {en} } @inproceedings{BussBaumannSchlangen, author = {Buß, Okko and Baumann, Timo and Schlangen, David}, title = {Collaborating on Utterances with a Spoken Dialogue System Using an ISU-based Approach to Incremental Dialogue Management}, series = {SIGDIAL 10, Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Tokyo Japan September 24 - 25, 2010}, booktitle = {SIGDIAL 10, Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue, Tokyo Japan September 24 - 25, 2010}, publisher = {Association for Computational Linguistics}, address = {Tokyo, Japan}, isbn = {978-1-932432-85-5}, abstract = {When dialogue systems, through theuse of incremental processing, arenot bounded anymore by strict, non-overlapping turn-taking, a whole range ofadditional interactional devices becomesavailable. We explore the use of one suchdevice, trial intonation. We elaborateour approach to dialogue managementin incremental systems, based on theInformation-State-Update approach, anddiscuss an implementation in a micro-domain that lends itself to the use ofimmediate feedback, trial intonations andexpansions. In an overhearer evaluation,the incremental system was judged as sig-nificantly more human-like and reactivethan a non-incremental version.}, language = {en} } @inproceedings{SchlangenBaumannAtterer, author = {Schlangen, David and Baumann, Timo and Atterer, Michaela}, title = {Incremental Reference Resolution: The Task, Metrics for Evaluation, and a Bayesian Filtering Model that is Sensitive to Disfluencies}, series = {Proceedings of the SIGDIAL 2009 Conference: The 10th Annual Meeting of the Special Interest Group on Discourse and Dialogue, September 11 - 12, 2009, London United Kingdom}, booktitle = {Proceedings of the SIGDIAL 2009 Conference: The 10th Annual Meeting of the Special Interest Group on Discourse and Dialogue, September 11 - 12, 2009, London United Kingdom}, publisher = {Association for Computational Linguistics}, address = {London, UK}, pages = {30 -- 37}, abstract = {In this paper we do two things: a) we discuss in general terms the task of incre mental reference resolution (IRR), in particular resolution of exophoric reference, and specify metrics for measuring the performance of dialogue system components tackling this task, and b) we present a simple Bayesian filtering model of IRR that performs reasonably well just using words directly (no structure information and no hand-coded semantics): it picks the right referent out of 12 for around 50 \% of real world dialogue utterances in our test corpus. It is also able to learn to interpret not only words but also hesitations, just as humans have shown to do in similar situations, namely as markers of references tohard-to-describe entities.}, language = {en} } @inproceedings{SchlangenBaumannBuschmeieretal., author = {Schlangen, David and Baumann, Timo and Buschmeier, Hendrik and Buß, Okko and Kopp, Stefan and Skantze, Gabriel and Yaghoubzadeh, Ramin}, title = {Middleware for Incremental Processing in Conversational Agents}, series = {Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SigDial 2010), September 24 - 25, 2010, Tokyo Japan}, booktitle = {Proceedings of the 11th Annual Meeting of the Special Interest Group on Discourse and Dialogue (SigDial 2010), September 24 - 25, 2010, Tokyo Japan}, publisher = {Association for Computational Linguistics}, address = {Tokyo, Japan}, pages = {51 -- 54}, abstract = {We describe work done at three sites on designing conversational agents capable of incremental processing. We focus on the ‘middleware’ layer in these systems, which takes care of passing around and maintaining incremental information between the modules of such agents. All implementations are based on the abstract model of incremental dialogue processing proposed by Schlangen and Skantze (2009), and the paper shows what different instantiations of the model can look like given specific requirements and application areas.}, language = {en} } @incollection{vanWelbergenBaumannSchlangenetal., author = {van Welbergen, Herwin and Baumann, Timo and Schlangen, David and Kopp, Stefan}, title = {Incremental, Adaptive and Interruptive Speech Realization for Fluent Conversation with ECAs}, series = {Intelligent Virtual Agents}, booktitle = {Intelligent Virtual Agents}, publisher = {Springer}, isbn = {978-3-642-40414-6}, pages = {468 -- 469}, abstract = {This project aimed to establish the feasibility of creating a procedural system for generating expressive facial animation based on an affective agent. A procedural system supporting a limited number of emotional expression changes was created alongside keyframed animations of these same emotional expression changes, and audience response to these two approaches was tested empirically. Results seem to partially support the procedural animations generated being comparable with keyframed, in terms of perceptual validity.}, language = {en} } @inproceedings{vonderMalsburgBaumannSchlangen, author = {von der Malsburg, Titus and Baumann, Timo and Schlangen, David}, title = {TELIDA: A Package for Manipulation and Visualisation of Timed Linguistic Data}, series = {Proceedings of the SIGDIAL 2009 Conference: The 10th Annual Meeting of the Special Interest Group on Discourse and Dialogue, September 11 - 12, 2009, London United Kingdom}, booktitle = {Proceedings of the SIGDIAL 2009 Conference: The 10th Annual Meeting of the Special Interest Group on Discourse and Dialogue, September 11 - 12, 2009, London United Kingdom}, publisher = {Association for Computational Linguistics}, address = {London, UK}, doi = {10.5555/1708376.1708419}, pages = {302 -- 305}, abstract = {We present a toolkit for manipulating andvisualising time-aligned linguistic datasuch as dialogue transcripts or languageprocessing data. The package comple-ments existing editing tools by allowingfor conversion between their formats, in-formation extraction from the raw files,and by adding sophisticated, and easily ex-tended methods for visualising the dynam-ics of dialogue processing. To illustratethe versatility of the package, we describeits use in three different projects at our site.}, language = {en} } @inproceedings{SchoenbergerFranzScherzingeretal., author = {Sch{\"o}nberger, Manuel and Franz, Maja and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Peel | Pile? Cross-Framework Portability of Quantum Software}, series = {2022 IEEE 19th International Conference on Software Architecture Companion (ICSA-C), 12-15 March 2022, Honolulu, HI, USA}, booktitle = {2022 IEEE 19th International Conference on Software Architecture Companion (ICSA-C), 12-15 March 2022, Honolulu, HI, USA}, publisher = {IEEE}, doi = {10.1109/ICSA-C54293.2022.00039}, abstract = {In recent years, various vendors have made quantum software frameworks available. Yet with vendor-specific frameworks, code portability seems at risk, especially in a field where hardware and software libraries have not yet reached a consolidated state, and even foundational aspects of the technologies are still in flux. Accordingly, the development of vendor-independent quantum programming languages and frameworks is often suggested. This follows the established architectural pattern of introducing additional levels of abstraction into software stacks, thereby piling on layers of abstraction. Yet software architecture also provides seemingly less abstract alternatives, namely to focus on hardware-specific formulations of problems that peel off unnecessary layers. In this article, we quantitatively and experimentally explore these strategic alternatives, and compare popular quantum frameworks from the software implementation perspective. We find that for several specific, yet generalisable problems, the mathematical formulation of the problem to be solved is not just sufficiently abstract and serves as precise description, but is likewise concrete enough to allow for deriving framework-specific implementations with little effort. Additionally, we argue, based on analysing dozens of existing quantum codes, that porting between frameworks is actually low-effort, since the quantum- and framework-specific portions are very manageable in terms of size, commonly in the order of mere hundreds of lines of code. Given the current state-of-the-art in quantum programming practice, this leads us to argue in favour of peeling off unnecessary abstraction levels.}, language = {en} } @unpublished{SchrammPieperVogl, author = {Schramm, Simon and Pieper, Matthias and Vogl, Stefanie}, title = {Orthogonal Procrustes Based Anomaly Detection and Error Prediction for Vehicle Bills of Materials}, series = {SSRN}, journal = {SSRN}, publisher = {Elsevier}, doi = {10.2139/ssrn.4120321}, pages = {21 S.}, abstract = {Industrial Bill of Materials (BOM) suffer from an surging complexity and cause errors in production which have detrimental effects on a product's profitability. Currently, BOM anomalies have to be identified manually and errors have to be detected in the same way. This preprint describes a combination of data analysis and Machine Learning methods, such as hierarchical and agglomerative clustering, an isolation forest algorithm, association mining and a multi-output Artificial Neural Network, all based on a deterministic distance measure for an industrial BOMs. Solving the orthogonal Procrustes problem for complex, multi-level matrices, a distance measure for real world industrial BOMs was derived. A multi-output MLP was used in order to predict error probabilities with a time- reference. Our results show how to detect anomalies and predict errors in a complex, multi-level BOM based on historical, labelled data. While other authors focus on the mere comparison of BOMs, we aimed at a holistic approach, combining descriptive and predictive methods in order to forecast where in a BOM and at what time of BOM creation process errors occur. The resulting, prescriptive system was tested using real world data and has shown to effectively predict where and when BOM errors are probable to occur. Consequently, the prescriptive system is superior to prior, purely predictive systems, can help to decrease errors and thereby decreases product development time and cost in real world companies.}, language = {en} } @inproceedings{AltenbuchnerHaugWeber, author = {Altenbuchner, Amelie and Haug, Sonja and Weber, Karsten}, title = {Exploratory Analysis of Motion Tracking Data in the Rehabilitation Process of Geriatric Trauma Patients}, series = {Studies in Health Technology and Informatic. Volume 260: dHealth 2019 - From eHealth to dHealth}, booktitle = {Studies in Health Technology and Informatic. Volume 260: dHealth 2019 - From eHealth to dHealth}, editor = {Hayn, Dieter and Eggerth, A. and Schreier, G{\"u}nter}, publisher = {IOS Press}, doi = {10.3233/978-1-61499-971-3-138}, pages = {138 -- 145}, abstract = {Background: This article is based on an ongoing long-term study, in which customary motion trackers measure steps during rehabilitation of geriatric trauma patients (Med=86 years). Objectives: Exploring steps after 28 days of measurement. Finding similarities in the data by running cluster analysis and formulating linear regressions models to predict steps through time. Methods: Two types of motion trackers (FitBitAlta HR and Garmin v{\´i}vofit 3) have been used to measure patients' (N=24) steps after hip fracture in two study groups. Cluster analysis detected three clusters for progress in number of steps that were tested for group differences with ANOVA. Regression analysis tested models for individual patients. Results: Three-cluster solutions showed significant differences for the average amount of steps after 5, 14, 21 and 28 days. Regression models could predict 71 \% of the individual patients' progress in study group 2. Conclusion: The long-term study will provide more data in the future to examine the three-cluster solution and to find out in what stage of rehabilitation the measurement of the steps could be used to predict individual rehabilitation.}, language = {en} } @unpublished{SchoenbergerTrummerMauerer, author = {Sch{\"o}nberger, Manuel and Trummer, Immanuel and Mauerer, Wolfgang}, title = {Quantum-Inspired Digital Annealing for Join Ordering}, series = {Proceedings of the VLDB Endowment}, journal = {Proceedings of the VLDB Endowment}, pages = {14}, abstract = {Finding the optimal join order (JO) is one of the most important problems in query optimisation, and has been extensively considered in research and practise. As it involves huge search spaces, approximation approaches and heuristics are commonly used, which explore a reduced solution space at the cost of solution quality. To explore even large JO search spaces, we may consider special-purpose software, such as mixed-integer linear programming (MILP) solvers, which have successfully solved JO problems. However, even mature solvers cannot overcome the limitations of conventional hardware prompted by the end of Moore's law. We consider quantum-inspired digital annealing hardware, which takes inspiration from quantum processing units (QPUs). Unlike QPUs, which likely remain limited in size and reliability in the near and mid-term future, the digital annealer (DA) can solve large instances of mathematically encoded optimisation problems today. We derive a novel, native encoding for the JO problem tailored to this class of machines that substantially improves over known MILP and quantum-based encodings, and reduces encoding size over the state-of-the-art. By augmenting the computation with a novel readout method, we derive valid join orders for each solution obtained by the (probabilistically operating) DA. Most importantly and despite an extremely large solution space, our approach scales to practically relevant dimensions of around 50 relations and improves result quality over conventionally employed approaches, adding a novel alternative to solving the long-standing JO problem.}, language = {en} } @unpublished{SchmidlDengMaetal., author = {Schmidl, Sebastian and Deng, Yangshen and Ma, Pingchuan and Sch{\"o}nberger, Manuel and Mauerer, Wolfgang}, title = {Reproducibility Report for ACM SIGMOD 2023 Paper: Ready to Leap (by Co-Design)? Join Order Optimisation}, abstract = {The paper "Ready to Leap (by Co-Design)? Join Order Optimisation on Quantum Hardware" proposes the first approach to solve the problem of join order optimization on quantum hardware. The authors characterize the applicability and limitations of current state-of-the-art quantum hardware, i. e. gate-based quantum computing and quantum annealing, for join ordering and recommend key improvements to the physical hardware to reach practical utility. Based on the provided database queries and QPU system processing data, we have been able to reproduce the original paper's key insights and quantum problem characteristics reported in its experimental section. The authors provided a self-contained and fully automated reproduction package, including data (database queries, statistics, and collected QPU processing data), experiment scripts, and plotting routines that allowed the identical reconstruction of the three main figures in the paper.}, language = {en} } @misc{SchoenbergerScherzingerMauerer, author = {Sch{\"o}nberger, Manuel and Scherzinger, Stefanie and Mauerer, Wolfgang}, title = {Applicability of Quantum Computing on Database Query Optimization}, series = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam (Poster Presentation)}, journal = {Fr{\"u}hjahrstreffen Fachgruppe Datenbanken in Potsdam (Poster Presentation)}, language = {en} } @inproceedings{BuchmannSchwaegerl, author = {Buchmann, Thomas and Schw{\"a}gerl, Felix}, title = {Ensuring well-formedness of configured domain models in model-driven product lines based on negative variability}, series = {FOSD '12: Proceedings of the 4th International Workshop on Feature-Oriented Software Development, 24. + 25. September 2012, Dresden}, booktitle = {FOSD '12: Proceedings of the 4th International Workshop on Feature-Oriented Software Development, 24. + 25. September 2012, Dresden}, publisher = {ACM Press}, address = {New York, USA}, doi = {10.1145/2377816.2377822}, pages = {37 -- 44}, abstract = {Model-driven development is a well-known practice in modern software engineering. Many tools exist which allow developers to build software in a model-based or even model-driven way, but they do not provide dedicated support for software product line development. Only recently some approaches combined model-driven engineering and software product line engineering. In this paper we present an approach that allows for combining feature models and Ecore-based domain models and provides extensive support to keep the mapping between the involved models consistent. Our key contribution is a declarative textual language which allows to phrase domain-specific consistency constraints which are preserved during the configuration process in order to ensure context-sensitive syntactical correctness of derived domain models.}, language = {en} } @phdthesis{ZInner, author = {ZInner, Helge}, title = {Vernetzung heterogener Feldbusse auf Basis des Standards Ethernet Audio Video Bridging}, address = {Ilmenau}, pages = {III, 191 S.}, language = {de} } @article{BeyerWeigertQuicketal., author = {Beyer, Thomas and Weigert, Markus and Quick, Harald H. and Pietrzyk, Uwe and Vogt, Florian and Palm, Christoph and Antoch, Gerald and M{\"u}ller, Stefan P. and Bockisch, Andreas}, title = {MR-based attenuation correction for torso-PET/MR imaging}, series = {European Journal of Nuclear Medicine and Molecular Imaging}, volume = {35}, journal = {European Journal of Nuclear Medicine and Molecular Imaging}, number = {6}, doi = {10.1007/s00259-008-0734-0}, pages = {1142 -- 1146}, abstract = {Purpose MR-based attenuation correction (AC) will become an integral part of combined PET/MR systems. Here, we propose a toolbox to validate MR-AC of clinical PET/MRI data sets. Methods Torso scans of ten patients were acquired on a combined PET/CT and on a 1.5-T MRI system. MR-based attenuation data were derived from the CT following MR-CT image co-registration and subsequent histogram matching. PET images were reconstructed after CT- (PET/CT) and MR-based AC (PET/MRI). Lesion-to-background (L/B) ratios were estimated on PET/CT and PET/MRI. Results MR-CT histogram matching leads to a mean voxel intensity difference in the CT- and MR-based attenuation images of 12\% (max). Mean differences between PET/MRI and PET/CT were 19\% (max). L/B ratios were similar except for the lung where local misregistration and intensity transformation leads to a biased PET/MRI. Conclusion Our toolbox can be used to study pitfalls in MR-AC. We found that co-registration accuracy and pixel value transformation determine the accuracy of PET/MRI.}, subject = {Kernspintomografie}, language = {en} } @article{HartmannNieberlePalmetal., author = {Hartmann, Robin and Nieberle, Felix and Palm, Christoph and Br{\´e}bant, Vanessa and Prantl, Lukas and Kuehle, Reinald and Reichert, Torsten E. and Taxis, Juergen and Ettl, Tobias}, title = {Utility of Smartphone-based Three-dimensional Surface Imaging for Digital Facial Anthropometry}, series = {JPRAS Open}, volume = {39}, journal = {JPRAS Open}, publisher = {Elsevier}, doi = {10.1016/j.jpra.2024.01.014}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-70348}, pages = {330 -- 343}, abstract = {Background The utilization of three-dimensional (3D) surface imaging for facial anthropometry is a significant asset for patients undergoing maxillofacial surgery. Notably, there have been recent advancements in smartphone technology that enable 3D surface imaging. In this study, anthropometric assessments of the face were performed using a smartphone and a sophisticated 3D surface imaging system. Methods 30 healthy volunteers (15 females and 15 males) were included in the study. An iPhone 14 Pro (Apple Inc., USA) using the application 3D Scanner App (Laan Consulting Corp., USA) and the Vectra M5 (Canfield Scientific, USA) were employed to create 3D surface models. For each participant, 19 anthropometric measurements were conducted on the 3D surface models. Subsequently, the anthropometric measurements generated by the two approaches were compared. The statistical techniques employed included the paired t-test, paired Wilcoxon signed-rank test, Bland-Altman analysis, and calculation of the intraclass correlation coefficient (ICC). Results All measurements showed excellent agreement between smartphone-based and Vectra M5-based measurements (ICC between 0.85 and 0.97). Statistical analysis revealed no statistically significant differences in the central tendencies for 17 of the 19 linear measurements. Despite the excellent agreement found, Bland-Altman analysis revealed that the 95\% limits of agreement between the two methods exceeded ±3 mm for the majority of measurements. Conclusion Digital facial anthropometry using smartphones can serve as a valuable supplementary tool for surgeons, enhancing their communication with patients. However, the proposed data suggest that digital facial anthropometry using smartphones may not yet be suitable for certain diagnostic purposes that require high accuracy.}, language = {en} } @article{EbigboMendelScheppachetal., author = {Ebigbo, Alanna and Mendel, Robert and Scheppach, Markus W. and Probst, Andreas and Shahidi, Neal and Prinz, Friederike and Fleischmann, Carola and R{\"o}mmele, Christoph and G{\"o}lder, Stefan Karl and Braun, Georg and Rauber, David and R{\"u}ckert, Tobias and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Byrne, Michael F. and Palm, Christoph and Messmann, Helmut}, title = {Vessel and tissue recognition during third-space endoscopy using a deep learning algorithm}, series = {Gut}, volume = {71}, journal = {Gut}, number = {12}, publisher = {BMJ}, address = {London}, doi = {10.1136/gutjnl-2021-326470}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-54293}, pages = {2388 -- 2390}, abstract = {In this study, we aimed to develop an artificial intelligence clinical decision support solution to mitigate operator-dependent limitations during complex endoscopic procedures such as endoscopic submucosal dissection and peroral endoscopic myotomy, for example, bleeding and perforation. A DeepLabv3-based model was trained to delineate vessels, tissue structures and instruments on endoscopic still images from such procedures. The mean cross-validated Intersection over Union and Dice Score were 63\% and 76\%, respectively. Applied to standardised video clips from third-space endoscopic procedures, the algorithm showed a mean vessel detection rate of 85\% with a false-positive rate of 0.75/min. These performance statistics suggest a potential clinical benefit for procedure safety, time and also training.}, language = {en} } @article{KnoedlerBaecherKaukeNavarroetal., author = {Kn{\"o}dler, Leonard and Baecher, Helena and Kauke-Navarro, Martin and Prantl, Lukas and Machens, Hans-G{\"u}nther and Scheuermann, Philipp and Palm, Christoph and Baumann, Raphael and Kehrer, Andreas and Panayi, Adriana C. and Knoedler, Samuel}, title = {Towards a Reliable and Rapid Automated Grading System in Facial Palsy Patients: Facial Palsy Surgery Meets Computer Science}, series = {Journal of Clinical Medicine}, volume = {11}, journal = {Journal of Clinical Medicine}, number = {17}, publisher = {MDPI}, address = {Basel}, doi = {10.3390/jcm11174998}, abstract = {Background: Reliable, time- and cost-effective, and clinician-friendly diagnostic tools are cornerstones in facial palsy (FP) patient management. Different automated FP grading systems have been developed but revealed persisting downsides such as insufficient accuracy and cost-intensive hardware. We aimed to overcome these barriers and programmed an automated grading system for FP patients utilizing the House and Brackmann scale (HBS). Methods: Image datasets of 86 patients seen at the Department of Plastic, Hand, and Reconstructive Surgery at the University Hospital Regensburg, Germany, between June 2017 and May 2021, were used to train the neural network and evaluate its accuracy. Nine facial poses per patient were analyzed by the algorithm. Results: The algorithm showed an accuracy of 100\%. Oversampling did not result in altered outcomes, while the direct form displayed superior accuracy levels when compared to the modular classification form (n = 86; 100\% vs. 99\%). The Early Fusion technique was linked to improved accuracy outcomes in comparison to the Late Fusion and sequential method (n = 86; 100\% vs. 96\% vs. 97\%). Conclusions: Our automated FP grading system combines high-level accuracy with cost- and time-effectiveness. Our algorithm may accelerate the grading process in FP patients and facilitate the FP surgeon's workflow.}, language = {en} } @article{SouzaJrPalmMendeletal., author = {Souza Jr., Luis Antonio de and Palm, Christoph and Mendel, Robert and Hook, Christian and Ebigbo, Alanna and Probst, Andreas and Messmann, Helmut and Weber, Silke A. T. and Papa, Jo{\~a}o Paulo}, title = {A survey on Barrett's esophagus analysis using machine learning}, series = {Computers in Biology and Medicine}, volume = {96}, journal = {Computers in Biology and Medicine}, publisher = {Elsevier}, doi = {10.1016/j.compbiomed.2018.03.014}, pages = {203 -- 213}, abstract = {This work presents a systematic review concerning recent studies and technologies of machine learning for Barrett's esophagus (BE) diagnosis and treatment. The use of artificial intelligence is a brand new and promising way to evaluate such disease. We compile some works published at some well-established databases, such as Science Direct, IEEEXplore, PubMed, Plos One, Multidisciplinary Digital Publishing Institute (MDPI), Association for Computing Machinery (ACM), Springer, and Hindawi Publishing Corporation. Each selected work has been analyzed to present its objective, methodology, and results. The BE progression to dysplasia or adenocarcinoma shows a complex pattern to be detected during endoscopic surveillance. Therefore, it is valuable to assist its diagnosis and automatic identification using computer analysis. The evaluation of the BE dysplasia can be performed through manual or automated segmentation through machine learning techniques. Finally, in this survey, we reviewed recent studies focused on the automatic detection of the neoplastic region for classification purposes using machine learning methods.}, subject = {Speiser{\"o}hrenkrankheit}, language = {en} } @article{OttPalmVogtetal., author = {Ott, Tankred and Palm, Christoph and Vogt, Robert and Oberprieler, Christoph}, title = {GinJinn: An object-detection pipeline for automated feature extraction from herbarium specimens}, series = {Applications in Plant Sciences}, volume = {8}, journal = {Applications in Plant Sciences}, number = {6}, publisher = {Wiley, Botanical Society of America}, issn = {2168-0450}, doi = {10.1002/aps3.11351}, pages = {e11351}, abstract = {PREMISE: The generation of morphological data in evolutionary, taxonomic, and ecological studies of plants using herbarium material has traditionally been a labor-intensive task. Recent progress in machine learning using deep artificial neural networks (deep learning) for image classification and object detection has facilitated the establishment of a pipeline for the automatic recognition and extraction of relevant structures in images of herbarium specimens. METHODS AND RESULTS: We implemented an extendable pipeline based on state-of-the-art deep-learning object-detection methods to collect leaf images from herbarium specimens of two species of the genus Leucanthemum. Using 183 specimens as the training data set, our pipeline extracted one or more intact leaves in 95\% of the 61 test images. CONCLUSIONS: We establish GinJinn as a deep-learning object-detection tool for the automatic recognition and extraction of individual leaves or other structures from herbarium specimens. Our pipeline offers greater flexibility and a lower entrance barrier than previous image-processing approaches based on hand-crafted features.}, subject = {Deep Learning}, language = {en} } @article{EbigboPalmProbstetal., author = {Ebigbo, Alanna and Palm, Christoph and Probst, Andreas and Mendel, Robert and Manzeneder, Johannes and Prinz, Friederike and Souza Jr., Luis Antonio de and Papa, Jo{\~a}o Paulo and Siersema, Peter and Messmann, Helmut}, title = {A technical review of artificial intelligence as applied to gastrointestinal endoscopy: clarifying the terminology}, series = {Endoscopy International Open}, volume = {07}, journal = {Endoscopy International Open}, number = {12}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-1010-5705}, pages = {1616 -- 1623}, abstract = {The growing number of publications on the application of artificial intelligence (AI) in medicine underlines the enormous importance and potential of this emerging field of research. In gastrointestinal endoscopy, AI has been applied to all segments of the gastrointestinal tract most importantly in the detection and characterization of colorectal polyps. However, AI research has been published also in the stomach and esophagus for both neoplastic and non-neoplastic disorders. The various technical as well as medical aspects of AI, however, remain confusing especially for non-expert physicians. This physician-engineer co-authored review explains the basic technical aspects of AI and provides a comprehensive overview of recent publications on AI in gastrointestinal endoscopy. Finally, a basic insight is offered into understanding publications on AI in gastrointestinal endoscopy.}, subject = {Diagnose}, language = {en} } @inproceedings{WoehlHuberLoibletal., author = {W{\"o}hl, Rebecca and Huber, Michaela and Loibl, Markus and Riebschl{\"a}ger, Birgit and Nerlich, Michael and Palm, Christoph}, title = {The Impact of Semi-Automated Segmentation and 3D Analysis on Testing New Osteosynthesis Material}, series = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2017; Algorithmen - Systeme - Anwendungen; Proceedings des Workshops vom 12. bis 14. M{\"a}rz 2017 in Heidelberg}, publisher = {Springer}, address = {Berlin}, doi = {10.1007/978-3-662-54345-0_30}, pages = {122 -- 127}, abstract = {A new protocol for testing osteosynthesis material postoperatively combining semi-automated segmentation and 3D analysis of surface meshes is proposed. By various steps of transformation and measuring, objective data can be collected. In this study the specifications of a locking plate used for mediocarpal arthrodesis of the wrist were examined. The results show, that union of the lunate, triquetrum, hamate and capitate was achieved and that the plate is comparable to coexisting arthrodesis systems. Additionally, it was shown, that the complications detected correlate to the clinical outcome. In synopsis, this protocol is considered beneficial and should be taken into account in further studies.}, subject = {Osteosynthese}, language = {en} } @inproceedings{Palm, author = {Palm, Christoph}, title = {Fusion of Serial 2D Section Images and MRI Reference}, series = {Workshop Innovative Verarbeitung bioelektrischer und biomagnetischer Signale (bbs2014), Berlin, 10.04.2014}, booktitle = {Workshop Innovative Verarbeitung bioelektrischer und biomagnetischer Signale (bbs2014), Berlin, 10.04.2014}, doi = {10.13140/RG.2.1.1358.3449}, abstract = {Serial 2D section images with high resolution, resulting from innovative imaging methods become even more valuable, if they are fused with in vivo volumes. Achieving this goal, the 3D context of the sections would be restored, the deformations would be corrected and the artefacts would be eliminated. However, the registration in this field faces big challenges and is not solved in general. On the other hand, several approaches have been introduced dealing at least with some of these difficulties. Here, a brief overview of the topic is given and some of the solutions are presented. It does not constitute the claim to be a complete review, but could be a starting point for those who are interested in this field.}, subject = {Kernspintomografie}, language = {en} } @article{MeinikheimMendelPalmetal., author = {Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fern{\´a}ndez-Esparrach, Gl{\`o}ria and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Influence of artificial intelligence on the diagnostic performance of endoscopists in the assessment of Barrett's esophagus: a tandem randomized and video trial}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, publisher = {Georg Thieme Verlag}, address = {Stuttgart}, doi = {10.1055/a-2296-5696}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:898-opus4-72818}, pages = {641 -- 649}, abstract = {Background This study evaluated the effect of an artificial intelligence (AI)-based clinical decision support system on the performance and diagnostic confidence of endoscopists in their assessment of Barrett's esophagus (BE). Methods 96 standardized endoscopy videos were assessed by 22 endoscopists with varying degrees of BE experience from 12 centers. Assessment was randomized into two video sets: group A (review first without AI and second with AI) and group B (review first with AI and second without AI). Endoscopists were required to evaluate each video for the presence of Barrett's esophagus-related neoplasia (BERN) and then decide on a spot for a targeted biopsy. After the second assessment, they were allowed to change their clinical decision and confidence level. Results AI had a stand-alone sensitivity, specificity, and accuracy of 92.2\%, 68.9\%, and 81.3\%, respectively. Without AI, BE experts had an overall sensitivity, specificity, and accuracy of 83.3\%, 58.1\%, and 71.5\%, respectively. With AI, BE nonexperts showed a significant improvement in sensitivity and specificity when videos were assessed a second time with AI (sensitivity 69.8\% [95\%CI 65.2\%-74.2\%] to 78.0\% [95\%CI 74.0\%-82.0\%]; specificity 67.3\% [95\%CI 62.5\%-72.2\%] to 72.7\% [95\%CI 68.2\%-77.3\%]). In addition, the diagnostic confidence of BE nonexperts improved significantly with AI. Conclusion BE nonexperts benefitted significantly from additional AI. BE experts and nonexperts remained significantly below the stand-alone performance of AI, suggesting that there may be other factors influencing endoscopists' decisions to follow or discard AI advice.}, language = {en} } @misc{EbigboRauberAyoubetal., author = {Ebigbo, Alanna and Rauber, David and Ayoub, Mousa and Birzle, Lisa and Matsumura, Tomoaki and Probst, Andreas and Steinbr{\"u}ck, Ingo and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Scheppach, Markus W. and Palm, Christoph and Messmann, Helmut}, title = {Early Esophageal Cancer and the Generalizability of Artificial Intelligence}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783775}, pages = {S428}, abstract = {Aims Artificial Intelligence (AI) systems in gastrointestinal endoscopy are narrow because they are trained to solve only one specific task. Unlike Narrow-AI, general AI systems may be able to solve multiple and unrelated tasks. We aimed to understand whether an AI system trained to detect, characterize, and segment early Barrett's neoplasia (Barrett's AI) is only capable of detecting this pathology or can also detect and segment other diseases like early squamous cell cancer (SCC). Methods 120 white light (WL) and narrow-band endoscopic images (NBI) from 60 patients (1 WL and 1 NBI image per patient) were extracted from the endoscopic database of the University Hospital Augsburg. Images were annotated by three expert endoscopists with extensive experience in the diagnosis and endoscopic resection of early esophageal neoplasias. An AI system based on DeepLabV3+architecture dedicated to early Barrett's neoplasia was tested on these images. The AI system was neither trained with SCC images nor had it seen the test images prior to evaluation. The overlap between the three expert annotations („expert-agreement") was the ground truth for evaluating AI performance. Results Barrett's AI detected early SCC with a mean intersection over reference (IoR) of 92\% when at least 1 pixel of the AI prediction overlapped with the expert-agreement. When the threshold was increased to 5\%, 10\%, and 20\% overlap with the expert-agreement, the IoR was 88\%, 85\% and 82\%, respectively. The mean Intersection Over Union (IoU) - a metric according to segmentation quality between the AI prediction and the expert-agreement - was 0.45. The mean expert IoU as a measure of agreement between the three experts was 0.60. Conclusions In the context of this pilot study, the predictions of SCC by a Barrett's dedicated AI showed some overlap to the expert-agreement. Therefore, features learned from Barrett's cancer-related training might be helpful also for SCC prediction. Our results allow different possible explanations. On the one hand, some Barrett's cancer features generalize toward the related task of assessing early SCC. On the other hand, the Barrett's AI is less specific to Barrett's cancer than a general predictor of pathological tissue. However, we expect to enhance the detection quality significantly by extending the training to SCC-specific data. The insight of this study opens the way towards a transfer learning approach for more efficient training of AI to solve tasks in other domains.}, language = {en} } @misc{ScheppachMendelRauberetal., author = {Scheppach, Markus W. and Mendel, Robert and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Artificial Intelligence (AI) improves endoscopists' vessel detection during endoscopic submucosal dissection (ESD)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1782891}, pages = {S93}, abstract = {Aims While AI has been successfully implemented in detecting and characterizing colonic polyps, its role in therapeutic endoscopy remains to be elucidated. Especially third space endoscopy procedures like ESD and peroral endoscopic myotomy (POEM) pose a technical challenge and the risk of operator-dependent complications like intraprocedural bleeding and perforation. Therefore, we aimed at developing an AI-algorithm for intraprocedural real time vessel detection during ESD and POEM. Methods A training dataset consisting of 5470 annotated still images from 59 full-length videos (47 ESD, 12 POEM) and 179681 unlabeled images was used to train a DeepLabV3+neural network with the ECMT semi-supervised learning method. Evaluation for vessel detection rate (VDR) and time (VDT) of 19 endoscopists with and without AI-support was performed using a testing dataset of 101 standardized video clips with 200 predefined blood vessels. Endoscopists were stratified into trainees and experts in third space endoscopy. Results The AI algorithm had a mean VDR of 93.5\% and a median VDT of 0.32 seconds. AI support was associated with a statistically significant increase in VDR from 54.9\% to 73.0\% and from 59.0\% to 74.1\% for trainees and experts, respectively. VDT significantly decreased from 7.21 sec to 5.09 sec for trainees and from 6.10 sec to 5.38 sec for experts in the AI-support group. False positive (FP) readings occurred in 4.5\% of frames. FP structures were detected significantly shorter than true positives (0.71 sec vs. 5.99 sec). Conclusions AI improved VDR and VDT of trainees and experts in third space endoscopy and may reduce performance variability during training. Further research is needed to evaluate the clinical impact of this new technology.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and R{\"o}mmele, Christoph and Nagl, Sandra and Schnoy, Elisabeth and Messmann, Helmut and Ebigbo, Alanna and Palm, Christoph}, title = {Artificial intelligence as a tool in the detection of the papillary ostium during ERCP}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783138}, pages = {S198}, abstract = {Aims Endoscopic retrograde cholangiopancreaticography (ERCP) is the gold standard in the diagnosis as well as treatment of diseases of the pancreatobiliary tract. However, it is technically complex and has a relatively high complication rate. In particular, cannulation of the papillary ostium remains challenging. The aim of this study is to examine whether a deep-learning algorithm can be used to detect the major duodenal papilla and in particular the papillary ostium reliably and could therefore be a valuable tool for inexperienced endoscopists, particularly in training situation. Methods We analyzed a total of 654 retrospectively collected images of 85 patients. Both the major duodenal papilla and the ostium were then segmented. Afterwards, a neural network was trained using a deep-learning algorithm. A 5-fold cross-validation was performed. Subsequently, we ran the algorithm on 5 prospectively collected videos of ERCPs. Results 5-fold cross-validation on the 654 labeled data resulted in an F1 value of 0.8007, a sensitivity of 0.8409 and a specificity of 0.9757 for the class papilla, and an F1 value of 0.5724, a sensitivity of 0.5456 and a specificity of 0.9966 for the class ostium. Regardless of the class, the average F1 value (class papilla and class ostium) was 0.6866, the sensitivity 0.6933 and the specificity 0.9861. In 100\% of cases the AI-detected localization of the papillary ostium in the prospectively collected videos corresponded to the localization of the cannulation performed by the endoscopist. Conclusions In the present study, the neural network was able to identify the major duodenal papilla with a high sensitivity and high specificity. In detecting the papillary ostium, the sensitivity was notably lower. However, when used on videos, the AI was able to identify the location of the subsequent cannulation with 100\% accuracy. In the future, the neural network will be trained with more data. Thus, a suitable tool for ERCP could be established, especially in the training situation.}, language = {en} } @misc{ScheppachNunesArizietal., author = {Scheppach, Markus W. and Nunes, Danilo Weber and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Meinikheim, Michael and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Procedural phase recognition in endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0044-1783804}, pages = {S439}, abstract = {Aims Recent evidence suggests the possibility of intraprocedural phase recognition in surgical operations as well as endoscopic interventions such as peroral endoscopic myotomy and endoscopic submucosal dissection (ESD) by AI-algorithms. The intricate measurement of intraprocedural phase distribution may deepen the understanding of the procedure. Furthermore, real-time quality assessment as well as automation of reporting may become possible. Therefore, we aimed to develop an AI-algorithm for intraprocedural phase recognition during ESD. Methods A training dataset of 364385 single images from 9 full-length ESD videos was compiled. Each frame was classified into one procedural phase. Phases included scope manipulation, marking, injection, application of electrical current and bleeding. Allocation of each frame was only possible to one category. This training dataset was used to train a Video Swin transformer to recognize the phases. Temporal information was included via logarithmic frame sampling. Validation was performed using two separate ESD videos with 29801 single frames. Results The validation yielded sensitivities of 97.81\%, 97.83\%, 95.53\%, 85.01\% and 87.55\% for scope manipulation, marking, injection, electric application and bleeding, respectively. Specificities of 77.78\%, 90.91\%, 95.91\%, 93.65\% and 84.76\% were measured for the same parameters. Conclusions The developed algorithm was able to classify full-length ESD videos on a frame-by-frame basis into the predefined classes with high sensitivities and specificities. Future research will aim at the development of quality metrics based on single-operator phase distribution.}, language = {en} } @misc{ScheppachRauberStallhoferetal., author = {Scheppach, Markus W. and Rauber, David and Stallhofer, Johannes and Muzalyova, Anna and Otten, Vera and Manzeneder, Carolin and Schwamberger, Tanja and Wanzl, Julia and Schlottmann, Jakob and Tadic, Vidan and Probst, Andreas and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Fleischmann, Carola and Meinikheim, Michael and Miller, Silvia and M{\"a}rkl, Bruno and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Performance comparison of a deep learning algorithm with endoscopists in the detection of duodenal villous atrophy (VA)}, series = {Endoscopy}, volume = {55}, journal = {Endoscopy}, number = {S02}, publisher = {Thieme}, doi = {10.1055/s-0043-1765421}, pages = {S165}, abstract = {Aims VA is an endoscopic finding of celiac disease (CD), which can easily be missed if pretest probability is low. In this study, we aimed to develop an artificial intelligence (AI) algorithm for the detection of villous atrophy on endoscopic images. Methods 858 images from 182 patients with VA and 846 images from 323 patients with normal duodenal mucosa were used for training and internal validation of an AI algorithm (ResNet18). A separate dataset was used for external validation, as well as determination of detection performance of experts, trainees and trainees with AI support. According to the AI consultation distribution, images were stratified into "easy" and "difficult". Results Internal validation showed 82\%, 85\% and 84\% for sensitivity, specificity and accuracy. External validation showed 90\%, 76\% and 84\%. The algorithm was significantly more sensitive and accurate than trainees, trainees with AI support and experts in endoscopy. AI support in trainees was associated with significantly improved performance. While all endoscopists showed significantly lower detection for "difficult" images, AI performance remained stable. Conclusions The algorithm outperformed trainees and experts in sensitivity and accuracy for VA detection. The significant improvement with AI support suggests a potential clinical benefit. Stable performance of the algorithm in "easy" and "difficult" test images may indicate an advantage in macroscopically challenging cases.}, language = {en} } @misc{ScheppachWeberNunesArizietal., author = {Scheppach, Markus W. and Weber Nunes, Danilo and Arizi, X. and Rauber, David and Probst, Andreas and Nagl, Sandra and R{\"o}mmele, Christoph and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {Single frame workflow recognition during endoscopic submucosal dissection (ESD) using artificial intelligence (AI)}, series = {Endoscopy}, volume = {57}, journal = {Endoscopy}, number = {S 02}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806324}, pages = {S511}, abstract = {Aims Precise surgical phase recognition and evaluation may improve our understanding of complex endoscopic procedures. Furthermore, quality control measurements and endoscopy training could benefit from objective descriptions of surgical phase distributions. Therefore, we aimed to develop an artificial intelligence algorithm for frame-by-frame operational phase recognition during endoscopic submucosal dissection (ESD). Methods Full length ESD-videos from 31 patients comprising 6.297.782 single images were collected retrospectively. Videos were annotated on a frame-by-frame basis for the operational macro-phases diagnostics, marking, injection, dissection and bleeding. Further subphases were the application of electrical current, visible injection of fluid into the submucosal space and scope manipulation, leading to 11 phases in total. 4.975.699 frames (21 patients) were used for training of a video swin transformer using uniform frame sampling for temporal information. Hyperparameter tuning was performed with 897.325 further frames (6 patients), while 424.758 frames (4 patients) were used for validation. Results The overall F1 scores on the test dataset for the macro-phases and all 11 phases were 0.96 and 0.90, respectively. The recall values for diagnostics, marking, injection, dissection and bleeding were 1.00, 1.00, 0.95, 0.96 and 0.93, respectively. Conclusions The algorithm classified operational phases during ESD with high accuracy. A precise evaluation of phase distribution may allow for the development of objective quality metrics for quality control and training.}, language = {en} } @article{WeigertPietrzykMuelleretal., author = {Weigert, Markus and Pietrzyk, Uwe and M{\"u}ller, Stefan P. and Palm, Christoph and Beyer, Thomas}, title = {Whole-body PET/CT imaging}, series = {Zeitschrift f{\"u}r Medizinische Physik}, volume = {18}, journal = {Zeitschrift f{\"u}r Medizinische Physik}, number = {1}, doi = {10.1016/j.zemedi.2007.07.004}, pages = {59 -- 66}, abstract = {Aim Combined whole-body (WB) PET/CT imaging provides better overall co-registration compared to separate CT and PET. However, in clinical routine local PET-CT mis-registration cannot be avoided. Thus, the reconstructed PET tracer distribution may be biased when using the misaligned CT transmission data for CT-based attenuation correction (CT-AC). We investigate the feasibility of retrospective co-registration techniques to align CT and PET images prior to CT-AC, thus improving potentially the quality of combined PET/CT imaging in clinical routine. Methods First, using a commercial software registration package CT images were aligned to the uncorrected PET data by rigid and non-rigid registration methods. Co-registration accuracy of both alignment approaches was assessed by reviewing the PET tracer uptake patterns (visual, linked cursor display) following attenuation correction based on the original and co-registered CT. Second, we investigated non-rigid registration based on a prototype ITK implementation of the B-spline algorithm on a similar targeted MR-CT registration task, there showing promising results. Results Manual rigid, landmark-based co-registration introduced unacceptable misalignment, in particular in peripheral areas of the whole-body images. Manual, non-rigid landmark-based co-registration prior to CT-AC was successful with minor loco-regional distortions. Nevertheless, neither rigid nor non-rigid automatic co-registration based on the Mutual Information image to image metric succeeded in co-registering the CT and noAC-PET images. In contrast to widely available commercial software registration our implementation of an alternative automated, non-rigid B-spline co-registration technique yielded promising results in this setting with MR-CT data. Conclusion In clinical PET/CT imaging, retrospective registration of CT and uncorrected PET images may improve the quality of the AC-PET images. As of today no validated and clinically viable commercial registration software is in routine use. This has triggered our efforts in pursuing new approaches to a validated, non-rigid co-registration algorithm applicable to whole-body PET/CT imaging of which first results are presented here. This approach appears suitable for applications in retrospective WB-PET/CT alignment. Ziel Kombinierte PET/CT-Bildgebung erm{\"o}glicht verbesserte Koregistrierung von PET- und CT-Daten gegen{\"u}ber separat akquirierten Bildern. Trotzdem entstehen in der klinischen Anwendung lokale Fehlregistrierungen, die zu Fehlern in der rekonstruierten PET- Tracerverteilung f{\"u}hren k{\"o}nnen, falls die unregistrierten CT-Daten zur Schw{\"a}chungskorrektur (AC) der Emissionsdaten verwendet werden. Wir untersuchen daher die Anwendung von Bildregistrierungsalgorithmen vor der CT-basierten AC zur Verbesserung der PET-Aufnahmen. Methoden Mittels einer kommerziellen Registrierungssoftware wurden die CT-Daten eines PET/CT- Tomographen durch landmarken- und intensit{\"a}tsbasierte rigide (starre) und nicht-rigide Registrierungsverfahren r{\"a}umlich an die unkorrigierten PET-Emissionsdaten angepasst und zur AC verwendet. Zur Bewertung wurden die Tracerverteilungen in den PET-Bildern (vor AC, CT-AC, CT-AC nach Koregistrierung) visuell und mit Hilfe korrelierter Fadenkreuze verglichen. Zus{\"a}tzlich untersuchten wir die ITK-Implementierung der bekannten B-spline basierten, nicht-rigiden Registrierungsans{\"a}tze im Hinblick auf ihre Verwendbarkeit f{\"u}r die multimodale PET/CT-Ganzk{\"o}rperregistrierung. Ergebnisse Mittels landmarkenbasierter, nicht-rigider Registrierung konnte die Tracerverteilung in den PET-Daten lokal verbessert werden. Landmarkenbasierte rigide Registrierung f{\"u}hrte zu starker Fehlregistrierung in entfernten K{\"o}rperregionen. Automatische rigide und nicht-rigide Registrierung unter Verwendung der Mutual-Information-{\"A}hnlichkeitsmetrik versagte auf allen verwendeten Datens{\"a}tzen. Die automatische Registrierung mit B-spline-Funktionen zeigte vielversprechende Resultate in der Anwendung auf einem {\"a}hnlich gelagerten CT-MR-Registrierungsproblem. Fazit Retrospektive, nicht-rigide Registrierung unkorrigierter PET- und CT-Aufnahmen aus kombinierten Aufnahmensystemen vor der AC kann die Qualit{\"a}t von PET-Aufnahmen im klinischen Einsatz verbessern. Trotzdem steht bis heute im klinischen Alltag keine validierte, automatische Registrierungssoftware zur Verf{\"u}gung. Wir verfolgen dazu Ans{\"a}tze f{\"u}r validierte, nicht-rigide Bildregistrierung f{\"u}r den klinischen Einsatz und pr{\"a}sentieren erste Ergebnisse.}, subject = {Positronen-Emissions-Tomografie}, language = {en} } @misc{RoserMeinikheimMendeletal., author = {Roser, David and Meinikheim, Michael and Mendel, Robert and Palm, Christoph and Probst, Andreas and Muzalyova, Anna and Scheppach, Markus W. and Nagl, Sandra and Schnoy, Elisabeth and R{\"o}mmele, Christoph and Schulz, Dominik Andreas Helmut Otto and Schlottmann, Jakob and Prinz, Friederike and Rauber, David and R{\"u}ckert, Tobias and Matsumura, Tomoaki and Fernandez-Esparrach, G. and Parsa, Nasim and Byrne, Michael F. and Messmann, Helmut and Ebigbo, Alanna}, title = {Human-Computer Interaction: Impact of Artificial Intelligence on the diagnostic confidence of endoscopists assessing videos of Barrett's esophagus}, series = {Endoscopy}, volume = {56}, journal = {Endoscopy}, number = {S 02}, publisher = {Georg Thieme Verlag}, issn = {1438-8812}, doi = {10.1055/s-0044-1782859}, pages = {79}, abstract = {Aims Human-computer interactions (HCI) may have a relevant impact on the performance of Artificial Intelligence (AI). Studies show that although endoscopists assessing Barrett's esophagus (BE) with AI improve their performance significantly, they do not achieve the level of the stand-alone performance of AI. One aspect of HCI is the impact of AI on the degree of certainty and confidence displayed by the endoscopist. Indirectly, diagnostic confidence when using AI may be linked to trust and acceptance of AI. In a BE video study, we aimed to understand the impact of AI on the diagnostic confidence of endoscopists and the possible correlation with diagnostic performance. Methods 22 endoscopists from 12 centers with varying levels of BE experience reviewed ninety-six standardized endoscopy videos. Endoscopists were categorized into experts and non-experts and randomly assigned to assess the videos with and without AI. Participants were randomized in two arms: Arm A assessed videos first without AI and then with AI, while Arm B assessed videos in the opposite order. Evaluators were tasked with identifying BE-related neoplasia and rating their confidence with and without AI on a scale from 0 to 9. Results The utilization of AI in Arm A (without AI first, with AI second) significantly elevated confidence levels for experts and non-experts (7.1 to 8.0 and 6.1 to 6.6, respectively). Only non-experts benefitted from AI with a significant increase in accuracy (68.6\% to 75.5\%). Interestingly, while the confidence levels of experts without AI were higher than those of non-experts with AI, there was no significant difference in accuracy between these two groups (71.3\% vs. 75.5\%). In Arm B (with AI first, without AI second), experts and non-experts experienced a significant reduction in confidence (7.6 to 7.1 and 6.4 to 6.2, respectively), while maintaining consistent accuracy levels (71.8\% to 71.8\% and 67.5\% to 67.1\%, respectively). Conclusions AI significantly enhanced confidence levels for both expert and non-expert endoscopists. Endoscopists felt significantly more uncertain in their assessments without AI. Furthermore, experts with or without AI consistently displayed higher confidence levels than non-experts with AI, irrespective of comparable outcomes. These findings underscore the possible role of AI in improving diagnostic confidence during endoscopic assessment.}, language = {en} } @misc{ZellmerRauberProbstetal., author = {Zellmer, Stephan and Rauber, David and Probst, Andreas and Weber, Tobias and Braun, Georg and Nagl, Sandra and R{\"o}mmele, Christoph and Schnoy, Elisabeth and Birzle, Lisa and Aehling, Niklas and Schulz, Dominik Andreas Helmut Otto and Palm, Christoph and Messmann, Helmut and Ebigbo, Alanna}, title = {K{\"u}nstliche Intelligenz als Hilfsmittel zur Detektion der Papilla duodeni major und des papill{\"a}ren Ostiums w{\"a}hrend der ERCP}, series = {Zeitschrift f{\"u}r Gastroenterologie}, volume = {63}, journal = {Zeitschrift f{\"u}r Gastroenterologie}, number = {5}, publisher = {Thieme}, address = {Stuttgart}, doi = {10.1055/s-0045-1806882}, pages = {e295}, abstract = {Einleitung Die Endoskopische Retrograde Cholangiopankreatikographie (ERCP) ist der Goldstandard in der endoskopischen Therapie von Erkrankungen des pankreatobili{\"a}ren Trakts. Allerdings ist sie technisch anspruchsvoll, schwer zu erlernen und mit einer relativ hohen Komplikationsrate assoziiert. Daher soll in der vorliegenden Machbarkeitsstudie gepr{\"u}ft werden, ob mithilfe eines Deeplearning- Algorithmus die Papille und das Ostium zuverl{\"a}ssig detektiert werden k{\"o}nnen und dieser f{\"u}r Endoskopiker, insbesondere in der Ausbildungssituation, ein geeignetes Hilfsmittel darstellen k{\"o}nnte. Material und Methodik Insgesamt wurden 1534 ERCP-Bilder von 134 Patienten analysiert, wobei sowohl die Papilla duodeni major als auch das Ostium segmentiert wurden. Anschließend erfolgte das Training eines neuronalen Netzes unter Verwendung eines Deep-Learning-Algorithmus. F{\"u}r den Test des Algorithmus erfolgte eine f{\"u}nffache Kreuzvalidierung. Ergebnisse Auf den 1534 gelabelten Bildern wurden f{\"u}r die Klasse Papille ein F1-Wert von 0,7996, eine Sensitivit{\"a}t von 0,8488 und eine Spezifit{\"a}t von 0,9822 erzielt. F{\"u}r die Klasse Ostium ergaben sich ein F1-Wert von 0,5198, eine Sensitivit{\"a}t von 0,5945 und eine Spezifit{\"a}t von 0,9974. Klassen{\"u}bergreifend (Klasse Papille und Klasse Ostium) betrug der F1-Wert 0,6593, die Sensitivit{\"a}t 0,7216 und f{\"u}r die Spezifit{\"a}t 0,9898. Zusammenfassung In der vorliegenden Machbarkeitsstudie zeigte das neuronale Netz eine hohe Sensitivit{\"a}t und eine sehr hohe Spezifit{\"a}t bei der Identifikation der Papilla duodeni major. Die Detektion des Ostiums erfolgte hingegen mit einer deutlich geringeren Sensitivit{\"a}t. Zuk{\"u}nftig ist eine Erweiterung des Trainingsdatensatzes um Videos und klinische Daten vorgesehen, um die Leistungsf{\"a}higkeit des Netzwerks zu verbessern. Hierdurch k{\"o}nnte langfristig ein geeignetes Assistenzsystem f{\"u}r die ERCP, insbesondere in der Ausbildungssituation etabliert werden.}, language = {de} } @article{DehnhardtPalmVietenetal., author = {Dehnhardt, Markus and Palm, Christoph and Vieten, Andrea and Bauer, Andreas and Pietrzyk, Uwe}, title = {Quantifying the A1AR distribution in peritumoral zones around experimental F98 and C6 rat brain tumours}, series = {Journal of Neuro-Oncology}, volume = {85}, journal = {Journal of Neuro-Oncology}, doi = {10.1007/s11060-007-9391-6}, pages = {49 -- 63}, abstract = {Quantification of growth in experimental F98 and C6 rat brain tumours was performed on 51 rat brains, 17 of which have been further assessed by 3D tumour reconstruction. Brains were cryosliced and radio-labelled with a ligand of the peripheral type benzodiazepine-receptor (pBR), 3H-Pk11195 [(1-(2-chlorophenyl)-N-methyl-N-(1-methyl-propylene)-3-isoquinoline-carboxamide)] by receptor autoradiography. Manually segmented and automatically registered tumours have been 3D-reconstructed for volumetric comparison on the basis of 3H-Pk11195-based tumour recognition. Furthermore automatically computed areas of -300 μm inner (marginal) zone as well as 300 μm and 600 μm outer tumour space were quantified. These three different regions were transferred onto other adjacent slices that had been labelled by receptor autoradiography with the A1 Adenosine receptor (A1AR)-ligand 3H-CPFPX (3H-8-cyclopentyl-3-(3-fluorpropyl)-1-propylxanthine) for quantitative assessment of A1AR in the three different tumour zones. Hence, a method is described for quantifying various receptor protein systems in the tumour as well as in the marginal invasive zones around experimentally implanted rat brain tumours and their representation in the tumour microenvironment as well as in 3D space. Furthermore, a tool for automatically reading out radio-labelled rat brain slices from auto radiographic films was developed, reconstructed into a consistent 3D-tumour model and the zones around the tumour were visualized. A1AR expression was found to depend upon the tumour volume in C6 animals, but is independent on the time of tumour development. In F98 animals, a significant increase in A1AR receptor protein was found in the Peritumoural zone as a function of time of tumour development and tumour volume.}, subject = {Hirntumor}, language = {en} } @article{MangSchnabelCrumetal., author = {Mang, Andreas and Schnabel, Julia A. and Crum, William R. and Modat, Marc and Camara-Rey, Oscar and Palm, Christoph and Caseiras, Gisele Brasil and J{\"a}ger, H. Rolf and Ourselin, S{\´e}bastien and Buzug, Thorsten M. and Hawkes, David J.}, title = {Consistency of parametric registration in serial MRI studies of brain tumor progression}, series = {International Journal of Computer Assisted Radiology and Surgery}, volume = {3}, journal = {International Journal of Computer Assisted Radiology and Surgery}, number = {3-4}, doi = {10.1007/s11548-008-0234-5}, pages = {201 -- 211}, abstract = {Object The consistency of parametric registration in multi-temporal magnetic resonance (MR) imaging studies was evaluated. Materials and methods Serial MRI scans of adult patients with a brain tumor (glioma) were aligned by parametric registration. The performance of low-order spatial alignment (6/9/12 degrees of freedom) of different 3D serial MR-weighted images is evaluated. A registration protocol for the alignment of all images to one reference coordinate system at baseline is presented. Registration results were evaluated for both, multimodal intra-timepoint and mono-modal multi-temporal registration. The latter case might present a challenge to automatic intensity-based registration algorithms due to ill-defined correspondences. The performance of our algorithm was assessed by testing the inverse registration consistency. Four different similarity measures were evaluated to assess consistency. Results Careful visual inspection suggests that images are well aligned, but their consistency may be imperfect. Sub-voxel inconsistency within the brain was found for allsimilarity measures used for parametric multi-temporal registration. T1-weighted images were most reliable for establishing spatial correspondence between different timepoints. Conclusions The parametric registration algorithm is feasible for use in this application. The sub-voxel resolution mean displacement error of registration transformations demonstrates that the algorithm converges to an almost identical solution for forward and reverse registration.}, subject = {Kernspintomografie}, language = {en} } @inproceedings{PalmGraemeCrumetal., author = {Palm, Christoph and Graeme, Penny P. and Crum, William R. and Schnabel, Julia A. and Pietrzyk, Uwe and Hawkes, David J.}, title = {Fusion of Rat Brain Histology and MRI using Weighted Multi-Image Mutual Information}, series = {Proceedings of the SPIE Medical Imaging 6914: Image Processing 69140M}, booktitle = {Proceedings of the SPIE Medical Imaging 6914: Image Processing 69140M}, number = {6914}, doi = {10.1117/12.770605}, pages = {69140M-1 -- 69140M-9}, abstract = {Fusion of histology and MRI is frequently demanded in biomedical research to study in vitro tissue properties in an in vivo reference space. Distortions and artifacts caused by cutting and staining of histological slices as well as differences in spatial resolution make even the rigid fusion a difficult task. State-of- the-art methods start with a mono-modal restacking yielding a histological pseudo-3D volume. The 3D information of the MRI reference is considered subsequently. However, consistency of the histology volume and consistency due to the corresponding MRI seem to be diametral goals. Therefore, we propose a novel fusion framework optimizing histology/histology and histology/MRI consistency at the same time finding a balance between both goals. Method - Direct slice-to-slice correspondence even in irregularly-spaced cutting sequences is achieved by registration-based interpolation of the MRI. Introducing a weighted multi-image mutual information metric (WI), adjacent histology and corresponding MRI are taken into account at the same time. Therefore, the reconstruction of the histological volume as well as the fusion with the MRI is done in a single step. Results - Based on two data sets with more than 110 single registrations in all, the results are evaluated quantitatively based on Tanimoto overlap measures and qualitatively showing the fused volumes. In comparison to other multi-image metrics, the reconstruction based on WI is significantly improved. We evaluated different parameter settings with emphasis on the weighting term steering the balance between intra- and inter-modality consistency.}, subject = {Kernspintomografie}, language = {en} } @article{DesernoHandelsMaierHeinetal., author = {Deserno, Thomas M. and Handels, Heinz and Maier-Hein, Klaus H. and Mersmann, Sven and Palm, Christoph and Tolxdorff, Thomas and Wagenknecht, Gudrun and Wittenberg, Thomas}, title = {Viewpoints on Medical Image Processing}, series = {Current Medical Imaging Reviews}, volume = {9}, journal = {Current Medical Imaging Reviews}, number = {2}, doi = {10.2174/1573405611309020002}, pages = {79 -- 88}, abstract = {Medical image processing provides core innovation for medical imaging. This paper is focused on recent developments from science to applications analyzing the past fifteen years of history of the proceedings of the German annual meeting on medical image processing (BVM). Furthermore, some members of the program committee present their personal points of views: (i) multi-modality for imaging and diagnosis, (ii) analysis of diffusion-weighted imaging, (iii) model-based image analysis, (iv) registration of section images, (v) from images to information in digital endoscopy, and (vi) virtual reality and robotics. Medical imaging and medical image computing is seen as field of rapid development with clear trends to integrated applications in diagnostics, treatment planning and treatment.}, subject = {Bildgebendes Verfahren}, language = {en} } @inproceedings{EibenKunzPietrzyketal., author = {Eiben, Bj{\"o}rn and Kunz, Dietmar and Pietrzyk, Uwe and Palm, Christoph}, title = {Level-Set-Segmentierung von Rattenhirn MRTs}, series = {Bildverarbeitung f{\"u}r die Medizin 2009; Algorithmen - Systeme - Anwendungen ; Proceedings des Workshops vom 22. bis 25. M{\"a}rz 2009 in Heidelberg}, booktitle = {Bildverarbeitung f{\"u}r die Medizin 2009; Algorithmen - Systeme - Anwendungen ; Proceedings des Workshops vom 22. bis 25. M{\"a}rz 2009 in Heidelberg}, publisher = {Springer}, address = {Berlin}, pages = {167 -- 171}, abstract = {In dieser Arbeit wird die Segmentierung von Gehirngewebe aus Kopfaufnahmen von Ratten mittels Level-Set-Methoden vorgeschlagen. Dazu wird ein zweidimensionaler, kontrastbasierter Ansatz zu einem dreidimensionalen, lokal an die Bildintensit{\"a}t adaptierten Segmentierer erweitert. Es wird gezeigt, dass mit diesem echten 3D-Ansatz die lokalen Bildstrukturen besser ber{\"u}cksichtigt werden k{\"o}nnen. Insbesondere Magnet-Resonanz-Tomographien (MRTs) mit globalen Helligkeitsgradienten, beispielsweise bedingt durch Oberfl{\"a}chenspulen, k{\"o}nnen auf diese Weise zuverl{\"a}ssiger und ohne weitere Vorverarbeitungsschritte segmentiert werden. Die Leistungsf{\"a}higkeit des Algorithmus wird experimentell an Hand dreier Rattenhirn-MRTs demonstriert.}, subject = {Dreidimensionale Bildverarbeitung}, language = {de} } @inproceedings{PietrzykPalmBeyer, author = {Pietrzyk, Uwe and Palm, Christoph and Beyer, Thomas}, title = {Investigation of fusion strategies of multi-modality images}, series = {IEEE Nuclear Science Symposium Conference Record}, volume = {4}, booktitle = {IEEE Nuclear Science Symposium Conference Record}, doi = {10.1109/NSSMIC.2004.1462740}, pages = {2399 -- 2401}, abstract = {Presenting images from different modalities seems to be a trivial task considering the challenges to obtain registered images as a pre-requisite for image fusion. In combined tomographs like PET/CT, image registration is intrinsic. However, informative image fusion mandates careful preparation owing to the large amount of information that is presented to the observer. In complex imaging situations it is required to provide tools that are easy to handle and still powerful enough to help the observer discriminating important details from background patterns. We investigated several options for color tables applied to brain and non-brain images obtained with PET, MRI and CT.}, language = {en} }