@article{JauernigUhlWalkowitz2022, author = {Jauernig, Johanna and Uhl, Matthias and Walkowitz, Gari}, title = {People prefer moral discretion to algorithms}, volume = {35}, pages = {2}, journal = {Philosophy \& Technology}, subtitle = {algorithm aversion beyond intransparency}, number = {1}, publisher = {Springer Nature}, address = {Cham}, issn = {2210-5441}, doi = {https://doi.org/10.1007/s13347-021-00495-y}, year = {2022}, abstract = {We explore aversion to the use of algorithms in moral decision-making. So far, this aversion has been explained mainly by the fear of opaque decisions that are potentially biased. Using incentivized experiments, we study which role the desire for human discretion in moral decision-making plays. This seems justified in light of evidence suggesting that people might not doubt the quality of algorithmic decisions, but still reject them. In our first study, we found that people prefer humans with decision-making discretion to algorithms that rigidly apply exogenously given human-created fairness principles to specific cases. In the second study, we found that people do not prefer humans to algorithms because they appreciate flesh-and-blood decision-makers per se, but because they appreciate humans' freedom to transcend fairness principles at will. Our results contribute to a deeper understanding of algorithm aversion. They indicate that emphasizing the transparency of algorithms that clearly follow fairness principles might not be the only element for fostering societal algorithm acceptance and suggest reconsidering certain features of the decision-making process.}, language = {en} } @article{FeierGogollUhl2022, author = {Feier, Till and Gogoll, Jan and Uhl, Matthias}, title = {Hiding Behind Machines: Artificial Agents May Help to Evade Punishment}, volume = {28}, pages = {19}, journal = {Science and Engineering Ethics}, number = {2}, publisher = {Springer Nature}, address = {Cham}, issn = {1471-5546}, doi = {https://doi.org/10.1007/s11948-022-00372-7}, year = {2022}, abstract = {The transfer of tasks with sometimes far-reaching implications to autonomous systems raises a number of ethical questions. In addition to fundamental questions about the moral agency of these systems, behavioral issues arise. We investigate the empirically accessible question of whether the imposition of harm by an agent is systematically judged differently when the agent is artificial and not human. The results of a laboratory experiment suggest that decision-makers can actually avoid punishment more easily by delegating to machines than by delegating to other people. Our results imply that the availability of artificial agents could provide stronger incentives for decision-makers to delegate sensitive decisions.}, language = {en} } @article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Algorithms as partners in crime: A lesson in ethics by design}, volume = {2023}, pages = {107483}, journal = {Computers in Human Behavior}, number = {138}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0747-5632}, doi = {https://doi.org/10.1016/j.chb.2022.107483}, year = {2022}, abstract = {The human in the loop is often advocated as a panacea against concerns about AI-powered machines, which increasingly take decisions of consequence in all realms of life. However, can we rely on humans to prevent unethical decisions by machines? We run online experiments modeling both the case where the machine serves as a corrective to the human and where the human serves as a corrective to the machine. Our results suggest that, in the former case, humans make similar decisions whether the corrective is a machine or another human. In the latter case, humans take advantage of rather than correct bad decisions by machines, turning into partners in crime. These findings caution us not to count too much on the human in the loop as a moral corrective. Instead, they tend to argue for human-machine decision-making where the human makes the decision and the machine is the corrective.}, language = {en} } @article{KruegelUhl2022, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {Is only one of my selves authentic? An empirical approach}, volume = {2023}, pages = {101971}, journal = {Journal of Behavioral and Experimental Economics}, number = {102}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2214-8051}, doi = {https://doi.org/10.1016/j.socec.2022.101971}, year = {2022}, abstract = {In behavioral economics, intrapersonal conflict is predominantly interpreted hierarchically. A "present-biased" intrapersonal doer may spoil the goal of a rational planner. This often suggests paternalistic interventions that help the "true" or "authentic" self to overwhelm its present-biased alter ego. Game theorist Schelling proposed a reciprocal interpretation of intrapersonal conflict that interprets both selves as strategic players, which Elster contradicted by claiming that in any conflict only one self is capable of strategic behavior and therefore authentic. Previous empirical studies, however, cannot test this interpretation, because their design provides commitment devices unilaterally to only one self. In an experiment, we provided commitment devices to both selves and find similar inclinations to use this strategic tool. Given this, the symmetric view on intrapersonal conflict seems no less plausible than the hierarchical one. Our results might contribute to a richer debate on intrapersonal conflict by feeding in some skepticism about the self-evidence with which paternalists take sides.}, language = {en} } @article{KruegelOstermaierUhl2022, author = {Kr{\"u}gel, Sebastian and Ostermaier, Andreas and Uhl, Matthias}, title = {Zombies in the Loop? Humans Trust Untrustworthy AI-Advisors for Ethical Decisions}, volume = {35}, pages = {17}, journal = {Philosophy \& Technology}, number = {1}, publisher = {Springer}, address = {Dodrecht}, issn = {2210-5441}, doi = {https://doi.org/10.1007/s13347-022-00511-9}, year = {2022}, abstract = {Departing from the claim that AI needs to be trustworthy, we find that ethical advice from an AI-powered algorithm is trusted even when its users know nothing about its training data and when they learn information about it that warrants distrust. We conducted online experiments where the subjects took the role of decision-makers who received advice from an algorithm on how to deal with an ethical dilemma. We manipulated the information about the algorithm and studied its influence. Our findings suggest that AI is overtrusted rather than distrusted. We suggest digital literacy as a potential remedy to ensure the responsible use of AI.}, language = {en} } @unpublished{KruegelUhl2022, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {The risk ethics of autonomous vehicles: a continuous trolley problem in regular road traffic}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2206.03258}, year = {2022}, abstract = {Is the ethics of autonomous vehicles (AVs) restricted to weighing lives in unavoidable accidents? We argue that AVs distribute risks between road users in regular traffic situations, either explicitly or implicitly. This distribution of risks raises ethically relevant questions that cannot be evaded by simple heuristics such as "hitting the brakes." Using an interactive, graphical representation of different traffic situations, we measured participants' preferences on driving maneuvers of AVs in a representative survey in Germany. Our participants' preferences deviated significantly from mere collision avoidance. Interestingly, our participants were willing to take risks themselves for the benefit of other road users suggesting that the social dilemma of AVs may lessen in a context of risk.}, language = {en} } @inbook{Uhl2022, author = {Uhl, Matthias}, title = {Order Ethics}, booktitle = {Evolving Business Ethics}, subtitle = {A Contemporary Ethics for the Digital Society}, editor = {L{\"u}tge, Christoph and Ziegler, Thejls}, publisher = {J.B. Metzler}, address = {Stuttgart}, isbn = {978-3-476-05844-7}, doi = {https://doi.org/10.1007/978-3-476-05845-4_7}, pages = {93 -- 98}, year = {2022}, language = {en} }