@article{KruegelUhl2023, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {Internal whistleblowing systems without proper sanctions may backfire}, volume = {93}, journal = {Journal of Business Economics}, number = {8}, publisher = {Springer}, address = {Wiesbaden}, issn = {1861-8928}, doi = {https://doi.org/10.1007/s11573-023-01144-w}, pages = {1355 -- 1383}, year = {2023}, abstract = {Internal whistleblowing systems are supposed to fight misconduct within organizations. Because it is difficult to study their efficacy in the field, scientific evidence on their performance is rare. This is problematic, because these systems bind substantial resources and might generate the erroneous impression of compliance in a company in which misconduct is prevalent. We therefore suggest a versatilely extendable experimental workhorse that allows the systematic study of internal whistleblowing systems in the lab. As a first step, we tested the efficacy of whistleblowing systems if internal punishment for misconduct is mild and hesitant which is usually the case in practice, as several fraud surveys confirm. Our results show that under these conditions almost nobody blew the whistle, and misconduct occurred even more frequently with than without a whistleblowing system. The institutionalization of whistleblowing seemed to crowd out the intrinsic motivation to act compliantly. Moreover, when a whistleblowing system was either unavailable or not used, misconduct was highly contagious and spread quickly. Yet, when we implemented severe and ensured punishment for misconduct, whistleblowing systems could deter wrongdoing. In such a setting, people were willing to blow the whistle and the prevalence of misconduct dropped substantially. Altogether, our results highlight the interaction between institutions and preferences and can support the design of compliance measures within organizations. For compliance managers a key takeaway is that if companies preach a zero-tolerance policy, they should practice it as well. Otherwise, they might even worsen the situation.}, language = {en} } @article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Algorithms as partners in crime: A lesson in ethics by design}, volume = {2023}, pages = {107483}, journal = {Computers in Human Behavior}, number = {138}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0747-5632}, doi = {https://doi.org/10.1016/j.chb.2022.107483}, year = {2022}, abstract = {The human in the loop is often advocated as a panacea against concerns about AI-powered machines, which increasingly take decisions of consequence in all realms of life. However, can we rely on humans to prevent unethical decisions by machines? We run online experiments modeling both the case where the machine serves as a corrective to the human and where the human serves as a corrective to the machine. Our results suggest that, in the former case, humans make similar decisions whether the corrective is a machine or another human. In the latter case, humans take advantage of rather than correct bad decisions by machines, turning into partners in crime. These findings caution us not to count too much on the human in the loop as a moral corrective. Instead, they tend to argue for human-machine decision-making where the human makes the decision and the machine is the corrective.}, language = {en} } @article{KruegelUhl2022, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {Is only one of my selves authentic? An empirical approach}, volume = {2023}, pages = {101971}, journal = {Journal of Behavioral and Experimental Economics}, number = {102}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2214-8051}, doi = {https://doi.org/10.1016/j.socec.2022.101971}, year = {2022}, abstract = {In behavioral economics, intrapersonal conflict is predominantly interpreted hierarchically. A "present-biased" intrapersonal doer may spoil the goal of a rational planner. This often suggests paternalistic interventions that help the "true" or "authentic" self to overwhelm its present-biased alter ego. Game theorist Schelling proposed a reciprocal interpretation of intrapersonal conflict that interprets both selves as strategic players, which Elster contradicted by claiming that in any conflict only one self is capable of strategic behavior and therefore authentic. Previous empirical studies, however, cannot test this interpretation, because their design provides commitment devices unilaterally to only one self. In an experiment, we provided commitment devices to both selves and find similar inclinations to use this strategic tool. Given this, the symmetric view on intrapersonal conflict seems no less plausible than the hierarchical one. Our results might contribute to a richer debate on intrapersonal conflict by feeding in some skepticism about the self-evidence with which paternalists take sides.}, language = {en} } @unpublished{KruegelOstermaierUhl2023, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {The moral authority of ChatGPT}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2301.07098}, year = {2023}, abstract = {ChatGPT is not only fun to chat with, but it also searches information, answers questions, and gives advice. With consistent moral advice, it might improve the moral judgment and decisions of users, who often hold contradictory moral beliefs. Unfortunately, ChatGPT turns out highly inconsistent as a moral advisor. Nonetheless, it influences users' moral judgment, we find in an experiment, even if they know they are advised by a chatting bot, and they underestimate how much they are influenced. Thus, ChatGPT threatens to corrupt rather than improves users' judgment. These findings raise the question of how to ensure the responsible use of ChatGPT and similar AI. Transparency is often touted but seems ineffective. We propose training to improve digital literacy.}, language = {en} } @article{KruegelOstermaierUhl2023, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {ChatGPT's inconsistent moral advice influences users' judgment}, volume = {13}, pages = {4569}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-023-31341-0}, year = {2023}, abstract = {AbstractChatGPT is not only fun to chat with, but it also searches information, answers questions, and gives advice. With consistent moral advice, it can improve the moral judgment and decisions of users. Unfortunately, ChatGPT's advice is not consistent. Nonetheless, it does influence users' moral judgment, we find in an experiment, even if they know they are advised by a chatting bot, and they underestimate how much they are influenced. Thus, ChatGPT corrupts rather than improves its users' moral judgment. While these findings call for better design of ChatGPT and similar bots, we also propose training to improve users' digital literacy as a remedy. Transparency, however, is not sufficient to enable the responsible use of AI.}, language = {en} } @inproceedings{RosbachAmmelingKruegeletal.2025, author = {Rosbach, Emely and Ammeling, Jonas and Kr{\"u}gel, Sebastian and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Puget, Chlo{\´e} and Donovan, Taryn and Klang, Andrea and K{\"o}ller, Maximilian C. and Bolfa, Pompei and Tecilla, Marco and Denk, Daniela and Kiupel, Matti and Paraschou, Georgios and Kok, Mun Keong and Haake, Alexander F. H. and de Krijger, Ronald R. and Sonnen, Andreas F.-P. and Kasantikul, Tanit and Dorrestein, Gerry M. and Smedley, Rebecca C. and Stathonikos, Nikolas and Uhl, Matthias and Bertram, Christof and Riener, Andreas and Aubreville, Marc}, title = {"When Two Wrongs Don't Make a Right" - Examining Confirmation Bias and the Role of Time Pressure During Human-AI Collaboration in Computational Pathology}, pages = {528}, booktitle = {CHI'25: Proceedings of the 2025 CHI Conference on Human Factors in Computing Systems}, editor = {Yamashita, Naomi and Evers, Vanessa and Yatani, Koji and Ding, Xianghua and Lee, Bongshin and Chetty, Marshini and Toups-Dugas, Phoebe}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-1394-1}, doi = {https://doi.org/10.1145/3706598.3713319}, year = {2025}, abstract = {Artificial intelligence (AI)-based decision support systems hold promise for enhancing diagnostic accuracy and efficiency in computational pathology. However, human-AI collaboration can introduce and amplify cognitive biases, like confirmation bias caused by false confirmation when erroneous human opinions are reinforced by inaccurate AI output. This bias may increase under time pressure, a ubiquitous factor in routine pathology, as it strains practitioners' cognitive resources. We quantified confirmation bias triggered by AI-induced false confirmation and examined the role of time constraints in a web-based experiment, where trained pathology experts (n=28) estimated tumor cell percentages. Our results suggest that AI integration fuels confirmation bias, evidenced by a statistically significant positive linear-mixed-effects model coefficient linking AI recommendations mirroring flawed human judgment and alignment with system advice. Conversely, time pressure appeared to weaken this relationship. These findings highlight potential risks of AI in healthcare and aim to support the safe integration of clinical decision support systems.}, language = {en} } @article{KruegelUhl2023, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {The behavioral economics of dynamically inconsistent behavior: a critical assessment}, volume = {61}, journal = {Social Choice and Welfare}, number = {4}, publisher = {Springer Nature}, address = {Berlin}, issn = {0176-1714}, doi = {https://doi.org/10.1007/s00355-023-01471-5}, pages = {817 -- 833}, year = {2023}, abstract = {Preferences often change—even in short time intervals—due to either the mere passage of time (present-biased preferences) or changes in visceral or environmental conditions (state-dependent preferences). On the basis of empirical findings concerning state-dependent preferences, we critically discuss the "Aristotelian" view of unitary decision makers in economics. We illustrate that the conceptualization of preferences as "present-biased" as opposed to "state-dependent" has very different normative implications for which preferences should be considered "rational." Empirically, however, the two concepts are very difficult to distinguish. The economist can justify any paternalistic intervention if she can conceptualize changing preferences so flexibly, and she can easily become a benevolent despot. We therefore urge for a more careful "Heraclitean" view of decision-making that accepts that a person may consist of multiple selves.}, language = {en} } @article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Zombies in the Loop? Humans Trust Untrustworthy AI-Advisors for Ethical Decisions}, volume = {35}, pages = {17}, journal = {Philosophy \& Technology}, number = {1}, publisher = {Springer}, address = {Dodrecht}, issn = {2210-5441}, doi = {https://doi.org/10.1007/s13347-022-00511-9}, year = {2022}, abstract = {Departing from the claim that AI needs to be trustworthy, we find that ethical advice from an AI-powered algorithm is trusted even when its users know nothing about its training data and when they learn information about it that warrants distrust. We conducted online experiments where the subjects took the role of decision-makers who received advice from an algorithm on how to deal with an ethical dilemma. We manipulated the information about the algorithm and studied its influence. Our findings suggest that AI is overtrusted rather than distrusted. We suggest digital literacy as a potential remedy to ensure the responsible use of AI.}, language = {en} } @article{KruegelUhl2024, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {The risk ethics of autonomous vehicles: an empirical approach}, volume = {14}, pages = {960}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-024-51313-2}, year = {2024}, abstract = {How would people distribute risks of autonomous vehicles (AVs) in everyday road traffic? The rich literature on the ethics of autonomous vehicles (AVs) revolves around moral judgments in unavoidable collision scenarios. We argue for extending the debate to driving behaviors in everyday road traffic where ubiquitous ethical questions arise due to the permanent redistribution of risk among road users. This distribution of risks raises ethically relevant questions that cannot be evaded by simple heuristics such as "hitting the brakes." Using an interactive, graphical representation of different traffic situations, we measured participants' preferences on driving maneuvers of AVs in a representative survey in Germany. Our participants' preferences deviated significantly from mere collision avoidance. Interestingly, our participants were willing to take risks themselves for the benefit of other road users, suggesting that the social dilemma of AVs may be mitigated in risky environments. Our research might build a bridge between engineers and philosophers to discuss the ethics of AVs more constructively.}, language = {en} } @unpublished{KruegelUhl2022, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {The risk ethics of autonomous vehicles: a continuous trolley problem in regular road traffic}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2206.03258}, year = {2022}, abstract = {Is the ethics of autonomous vehicles (AVs) restricted to weighing lives in unavoidable accidents? We argue that AVs distribute risks between road users in regular traffic situations, either explicitly or implicitly. This distribution of risks raises ethically relevant questions that cannot be evaded by simple heuristics such as "hitting the brakes." Using an interactive, graphical representation of different traffic situations, we measured participants' preferences on driving maneuvers of AVs in a representative survey in Germany. Our participants' preferences deviated significantly from mere collision avoidance. Interestingly, our participants were willing to take risks themselves for the benefit of other road users suggesting that the social dilemma of AVs may lessen in a context of risk.}, language = {en} }