@inproceedings{RosbachAmmelingKruegeletal.2025, author = {Rosbach, Emely and Ammeling, Jonas and Kr{\"u}gel, Sebastian and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Puget, Chlo{\´e} and Donovan, Taryn and Klang, Andrea and K{\"o}ller, Maximilian C. and Bolfa, Pompei and Tecilla, Marco and Denk, Daniela and Kiupel, Matti and Paraschou, Georgios and Kok, Mun Keong and Haake, Alexander F. H. and de Krijger, Ronald R. and Sonnen, Andreas F.-P. and Kasantikul, Tanit and Dorrestein, Gerry M. and Smedley, Rebecca C. and Stathonikos, Nikolas and Uhl, Matthias and Bertram, Christof and Riener, Andreas and Aubreville, Marc}, title = {"When Two Wrongs Don't Make a Right" - Examining Confirmation Bias and the Role of Time Pressure During Human-AI Collaboration in Computational Pathology}, pages = {528}, booktitle = {CHI'25: Proceedings of the 2025 CHI Conference on Human Factors in Computing Systems}, editor = {Yamashita, Naomi and Evers, Vanessa and Yatani, Koji and Ding, Xianghua and Lee, Bongshin and Chetty, Marshini and Toups-Dugas, Phoebe}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-1394-1}, doi = {https://doi.org/10.1145/3706598.3713319}, year = {2025}, abstract = {Artificial intelligence (AI)-based decision support systems hold promise for enhancing diagnostic accuracy and efficiency in computational pathology. However, human-AI collaboration can introduce and amplify cognitive biases, like confirmation bias caused by false confirmation when erroneous human opinions are reinforced by inaccurate AI output. This bias may increase under time pressure, a ubiquitous factor in routine pathology, as it strains practitioners' cognitive resources. We quantified confirmation bias triggered by AI-induced false confirmation and examined the role of time constraints in a web-based experiment, where trained pathology experts (n=28) estimated tumor cell percentages. Our results suggest that AI integration fuels confirmation bias, evidenced by a statistically significant positive linear-mixed-effects model coefficient linking AI recommendations mirroring flawed human judgment and alignment with system advice. Conversely, time pressure appeared to weaken this relationship. These findings highlight potential risks of AI in healthcare and aim to support the safe integration of clinical decision support systems.}, language = {en} } @unpublished{RosbachAmmelingKruegeletal.2024, author = {Rosbach, Emely and Ammeling, Jonas and Kr{\"u}gel, Sebastian and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Puget, Chlo{\´e} and Donovan, Taryn and Klang, Andrea and K{\"o}ller, Maximilian C. and Bolfa, Pompei and Tecilla, Marco and Denk, Daniela and Kiupel, Matti and Paraschou, Georgios and Kok, Mun Keong and Haake, Alexander F. H. and de Krijger, Ronald R. and Sonnen, Andreas F.-P. and Kasantikul, Tanit and Dorrestein, Gerry M. and Smedley, Rebecca C. and Stathonikos, Nikolas and Uhl, Matthias and Bertram, Christof and Riener, Andreas and Aubreville, Marc}, title = {"When TwoWrongs Don't Make a Right" - Examining Confirmation Bias and the Role of Time Pressure During Human-AI Collaboration in Computational Pathology}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2411.01007}, year = {2024}, language = {en} } @inproceedings{AmmelingMangerKwakaetal.2023, author = {Ammeling, Jonas and Manger, Carina and Kwaka, Elias and Kr{\"u}gel, Sebastian and Uhl, Matthias and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Riener, Andreas and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Appealing but Potentially Biasing - Investigation of the Visual Representation of Segmentation Predictions by AI Recommender Systems for Medical Decision Making}, booktitle = {Mensch und Computer 2023: Building Bridges: Tagungsband (Proceedings)}, editor = {Stolze, Markus and Loch, Frieder and Baldauf, Matthias and Alt, Florian and Schneegass, Christina and Kosch, Thomas and Hirzle, Teresa and Sadeghian, Shadan and Draxler, Fiona and Bektas, Kenan and Lohan, Katrin and Knierim, Pascal}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-0771-1}, doi = {https://doi.org/10.1145/3603555.3608561}, pages = {330 -- 335}, year = {2023}, language = {en} } @article{RosbachAmmelingGanzetal.2026, author = {Rosbach, Emely and Ammeling, Jonas and Ganz, Jonathan and Bertram, Christof and Conrad, Thomas and Riener, Andreas and Aubreville, Marc}, title = {Stuck on Suggestions: Automation Bias, the Anchoring Effect, and the Factors That Shape Them in Computational Pathology}, volume = {3}, pages = {2026:007}, journal = {Machine Learning for Biomedical Imaging}, number = {MELBA-BVM 2025 Special Issue}, publisher = {Melba editors}, address = {[s. l.]}, issn = {2766-905X}, doi = {https://doi.org/10.59275/j.melba.2026-87b1}, pages = {126 -- 147}, year = {2026}, abstract = {Artificial intelligence (AI)-driven clinical decision support systems (CDSS) hold promise to improve diagnostic accuracy and efficiency in computational pathology. However, collaboration between human experts and AI may give rise to cognitive biases, such as automation and anchoring bias, wherein users may be inclined to blindly adopt system recommendations or be disproportionately influenced by the presence of AI predictions, even when they are inaccurate. These biases may be exacerbated under time pressure, pervasive in routine pathology diagnostics, or shaped by individual user characteristics. To investigate these effects, we conducted a web-based experiment in which trained pathology experts (n = 28) estimated tumor cell percentages twice: once independently and once with the aid of an AI. A subset of the estimates in each condition was performed under time constraints. Our findings indicate that AI integration generally enhances diagnostic performance. However, it also introduced a 7\% automation bias rate, quantified as the number of accepted negative consultations, where a previously correct independent assessment gets overturned by inaccurate AI guidance. While time pressure did not increase the frequency of automation bias occurrence, it appeared to intensify its severity, as evidenced by a performance decline linked to increased automation reliance under cognitive load. A linear mixed-effects model (LMM) analysis, simulating weighted averaging, revealed a statistically significant positive coefficient for AI advice, indicating a moderate degree of anchoring on system output. This effect was further intensified under time pressure, suggesting that anchoring bias may become more pronounced when cognitive resources are limited. A secondary LMM evaluation assessing automation reliance, used as a proxy for both automation and anchoring bias, demonstrated that professional experience and self-efficacy were associated with reduced dependence on system support, whereas higher confidence during AI-assisted decision-making was linked to increased automation reliance. Together, these findings underscore the dual nature of AI integration in clinical workflows, offering performance benefits while also introducing risks of cognitive bias-driven diagnostic errors. As an initial investigation focused on a single medical specialty and diagnostic task, this study aims to lay the groundwork for future research to explore these phenomena across diverse clinical contexts, ultimately supporting the establishment of appropriate reliance on automated systems and the safe, effective integration of human-AI collaboration in medical decision-making.}, language = {en} }