@article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Algorithms as partners in crime: A lesson in ethics by design}, volume = {2023}, pages = {107483}, journal = {Computers in Human Behavior}, number = {138}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0747-5632}, doi = {https://doi.org/10.1016/j.chb.2022.107483}, year = {2022}, abstract = {The human in the loop is often advocated as a panacea against concerns about AI-powered machines, which increasingly take decisions of consequence in all realms of life. However, can we rely on humans to prevent unethical decisions by machines? We run online experiments modeling both the case where the machine serves as a corrective to the human and where the human serves as a corrective to the machine. Our results suggest that, in the former case, humans make similar decisions whether the corrective is a machine or another human. In the latter case, humans take advantage of rather than correct bad decisions by machines, turning into partners in crime. These findings caution us not to count too much on the human in the loop as a moral corrective. Instead, they tend to argue for human-machine decision-making where the human makes the decision and the machine is the corrective.}, language = {en} } @unpublished{KruegelOstermaierUhl2023, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {The moral authority of ChatGPT}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2301.07098}, year = {2023}, abstract = {ChatGPT is not only fun to chat with, but it also searches information, answers questions, and gives advice. With consistent moral advice, it might improve the moral judgment and decisions of users, who often hold contradictory moral beliefs. Unfortunately, ChatGPT turns out highly inconsistent as a moral advisor. Nonetheless, it influences users' moral judgment, we find in an experiment, even if they know they are advised by a chatting bot, and they underestimate how much they are influenced. Thus, ChatGPT threatens to corrupt rather than improves users' judgment. These findings raise the question of how to ensure the responsible use of ChatGPT and similar AI. Transparency is often touted but seems ineffective. We propose training to improve digital literacy.}, language = {en} } @article{KruegelOstermaierUhl2023, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {ChatGPT's inconsistent moral advice influences users' judgment}, volume = {13}, pages = {4569}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-023-31341-0}, year = {2023}, abstract = {AbstractChatGPT is not only fun to chat with, but it also searches information, answers questions, and gives advice. With consistent moral advice, it can improve the moral judgment and decisions of users. Unfortunately, ChatGPT's advice is not consistent. Nonetheless, it does influence users' moral judgment, we find in an experiment, even if they know they are advised by a chatting bot, and they underestimate how much they are influenced. Thus, ChatGPT corrupts rather than improves its users' moral judgment. While these findings call for better design of ChatGPT and similar bots, we also propose training to improve users' digital literacy as a remedy. Transparency, however, is not sufficient to enable the responsible use of AI.}, language = {en} } @article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Zombies in the Loop? Humans Trust Untrustworthy AI-Advisors for Ethical Decisions}, volume = {35}, pages = {17}, journal = {Philosophy \& Technology}, number = {1}, publisher = {Springer}, address = {Dodrecht}, issn = {2210-5441}, doi = {https://doi.org/10.1007/s13347-022-00511-9}, year = {2022}, abstract = {Departing from the claim that AI needs to be trustworthy, we find that ethical advice from an AI-powered algorithm is trusted even when its users know nothing about its training data and when they learn information about it that warrants distrust. We conducted online experiments where the subjects took the role of decision-makers who received advice from an algorithm on how to deal with an ethical dilemma. We manipulated the information about the algorithm and studied its influence. Our findings suggest that AI is overtrusted rather than distrusted. We suggest digital literacy as a potential remedy to ensure the responsible use of AI.}, language = {en} }