@article{JauernigUhl2018, author = {Jauernig, Johanna and Uhl, Matthias}, title = {Spite and preemptive retaliation after tournaments}, volume = {2019}, journal = {Journal of Economic Behavior \& Organization}, number = {158}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1879-1751}, doi = {https://doi.org/10.1016/j.jebo.2018.12.001}, pages = {328 -- 336}, year = {2018}, language = {en} } @book{LuetgeUhl2021, author = {L{\"u}tge, Christoph and Uhl, Matthias}, title = {Business ethics}, subtitle = {an economically informed perspective}, publisher = {Oxford University Press}, address = {Oxford}, isbn = {978-0-19-263385-9}, doi = {https://doi.org/10.1093/oso/9780198864776.001.0001}, pages = {x, 332}, year = {2021}, language = {en} } @article{UhlLuetge2018, author = {Uhl, Matthias and L{\"u}tge, Christoph}, title = {Teaching Business Ethics with Experiments}, volume = {15}, journal = {Journal of Business Ethics Education}, publisher = {NeilsonJournals Publishing}, address = {Edinburgh}, issn = {2044-4559}, doi = {https://doi.org/10.5840/jbee20181510}, pages = {203 -- 217}, year = {2018}, language = {en} } @article{GogollUhl2018, author = {Gogoll, Jan and Uhl, Matthias}, title = {Rage against the machine: Automation in the moral domain}, volume = {2018}, journal = {Journal of Behavioral and Experimental Economics}, number = {74}, publisher = {Elsevier}, address = {Amsterdam}, issn = {2214-8043}, doi = {https://doi.org/10.1016/j.socec.2018.04.003}, pages = {97 -- 103}, year = {2018}, language = {en} } @article{LevatiUhlZultan2013, author = {Levati, Maria Vittoria and Uhl, Matthias and Zultan, Ro'i}, title = {Imperfect recall and time inconsistencies: an experimental test of the absentminded driver "paradox"}, volume = {43}, journal = {International Journal of Game Theory}, number = {1}, publisher = {Springer}, address = {Heidelberg}, issn = {1432-1270}, doi = {https://doi.org/10.1007/s00182-013-0373-y}, pages = {65 -- 88}, year = {2013}, language = {en} } @article{KatariaLevatiUhl2014, author = {Kataria, Mitesh and Levati, Maria Vittoria and Uhl, Matthias}, title = {Paternalism with hindsight: do prot{\´e}g{\´e}s react consequentialistically to paternalism?}, volume = {43}, journal = {Social Choice and Welfare}, number = {3}, publisher = {Springer}, address = {Berlin}, issn = {1432-217X}, doi = {https://doi.org/10.1007/s00355-014-0800-4}, pages = {731 -- 746}, year = {2014}, language = {en} } @inbook{SchoenmannUhl2023, author = {Sch{\"o}nmann, Manuela and Uhl, Matthias}, title = {Eine ethische Perspektive auf KI in der Bildung}, booktitle = {K{\"u}nstliche Intelligenz in der Bildung}, editor = {de Witt, Claudia and Gloerfeld, Christina and Wrede, Silke Elisabeth}, publisher = {Springer VS}, address = {Wiesbaden}, isbn = {978-3-658-40079-8}, doi = {https://doi.org/10.1007/978-3-658-40079-8_21}, pages = {433 -- 453}, year = {2023}, language = {de} } @unpublished{RosbachAmmelingKruegeletal.2024, author = {Rosbach, Emely and Ammeling, Jonas and Kr{\"u}gel, Sebastian and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Puget, Chlo{\´e} and Donovan, Taryn and Klang, Andrea and K{\"o}ller, Maximilian C. and Bolfa, Pompei and Tecilla, Marco and Denk, Daniela and Kiupel, Matti and Paraschou, Georgios and Kok, Mun Keong and Haake, Alexander F. H. and de Krijger, Ronald R. and Sonnen, Andreas F.-P. and Kasantikul, Tanit and Dorrestein, Gerry M. and Smedley, Rebecca C. and Stathonikos, Nikolas and Uhl, Matthias and Bertram, Christof and Riener, Andreas and Aubreville, Marc}, title = {"When TwoWrongs Don't Make a Right" - Examining Confirmation Bias and the Role of Time Pressure During Human-AI Collaboration in Computational Pathology}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2411.01007}, year = {2024}, language = {en} } @article{BodenschatzUhlWalkowitz2021, author = {Bodenschatz, Anja and Uhl, Matthias and Walkowitz, Gari}, title = {Autonomous systems in ethical dilemmas}, volume = {2021}, pages = {100145}, journal = {Computers in human behavior reports}, subtitle = {attitudes toward randomization}, number = {4}, publisher = {Amsterdam}, address = {Elsevier}, issn = {2451-9588}, doi = {https://doi.org/10.1016/j.chbr.2021.100145}, year = {2021}, abstract = {It is ethically debatable whether autonomous systems should be programmed to actively impose harm on some to avoid greater harm for others. Surveys on ethical dilemmas in self-driving cars' programming have shown that people favor imposing harm on some people to save others from suffering and are consequently willing to sacrifice smaller groups to save larger ones in unavoidable accident situations. This is, if people are forced to directly impose harm. Contrary to humans, autonomous systems feature a salient deontological alternative for immediate decisions: the ability to randomize decisions over dilemmatic outcomes. To be applicable in democracies, randomization must correspond to people's moral intuition. In three studies (N = 935), we present empirical evidence that many people prefer to randomize between dilemmatic outcomes due to moral considerations. We find these preferences in hypothetical and incentivized decision-making situations. We also find that preferences are robust in different contexts and persist across Germany, with its Kantian cultural tradition, and the US, with its utilitarian cultural tradition.}, language = {en} } @article{GrundherrJauernigUhl2021, author = {Grundherr, Michael von and Jauernig, Johanna and Uhl, Matthias}, title = {To condemn is not to punish}, volume = {12}, pages = {38}, journal = {Games}, subtitle = {an experiment on hypocrisy}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2073-4336}, doi = {https://doi.org/10.3390/g12020038}, year = {2021}, abstract = {Hypocrisy is the act of claiming moral standards to which one's own behavior does not conform. Instances of hypocrisy, such as the supposedly green furnishing group IKEA's selling of furniture made from illegally felled wood, are frequently reported in the media. In a controlled and incentivized experiment, we investigate how observers rate different types of hypocritical behavior and if this judgment also translates into punishment. Results show that observers do, indeed, condemn hypocritical behavior strongly. The aversion to deceptive behavior is, in fact, so strong that even purely self-deceptive behavior is regarded as blameworthy. Observers who score high in the moral identity test have particularly strong reactions to acts of hypocrisy. The moral condemnation of hypocritical behavior, however, fails to produce a proportional amount of punishment. Punishment seems to be driven more by the violation of the norm of fair distribution than by moral pretense. From the viewpoint of positive retributivism, it is problematic if neither formal nor informal punishment follows moral condemnation.}, language = {en} } @article{MigrowUhl2011, author = {Migrow, Dimitri and Uhl, Matthias}, title = {The Resolution Game: A Dual Selves Perspective}, volume = {2}, journal = {Games}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2073-4336}, doi = {https://doi.org/10.3390/g2040452}, pages = {452 -- 462}, year = {2011}, abstract = {This article explains the emergence of an unique equilibrium resolution as the result of a compromise between two selves with different preferences. The stronger this difference is, the more generous the resolution gets. This result is in contrast to predictions of other models in which sinful consumption is distributed bimodally. Therefore, our result fits better with our daily observations concerning a lot of ambivalent goods where we often form nonrigid resolutions. The normative analysis uses the device of a hypothetical impartial self that regards both conflicting motives as equally legitimate. The result of this analysis is dilemmatic. It demonstrates that the resolution is broken too often to be welfare maximal. However, the introduction of external self-commitment devices results in their overuse and is welfare decreasing.}, language = {en} } @inbook{Uhl2022, author = {Uhl, Matthias}, title = {Order Ethics}, booktitle = {Evolving Business Ethics}, subtitle = {A Contemporary Ethics for the Digital Society}, editor = {L{\"u}tge, Christoph and Ziegler, Thejls}, publisher = {J.B. Metzler}, address = {Stuttgart}, isbn = {978-3-476-05844-7}, doi = {https://doi.org/10.1007/978-3-476-05845-4_7}, pages = {93 -- 98}, year = {2022}, language = {en} } @article{AmmelingAubrevilleFritzetal.2024, author = {Ammeling, Jonas and Aubreville, Marc and Fritz, Alexis and Kießig, Angelika and Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {An interdisciplinary perspective on AI-supported decision making in medicine}, volume = {2025}, pages = {102791}, journal = {Technology in Society}, number = {81}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1879-3274}, doi = {https://doi.org/10.1016/j.techsoc.2024.102791}, year = {2024}, abstract = {Artificial intelligence (AI)-supported medical diagnosis offers the potential to utilize the collaborative intelligence of context-sensitive humans and narrowly focused machines for patients' benefit. The employment of machine-learning-based decision-support systems (MLDSS) in medicine, however, raises important multidisciplinary challenges that cannot be addressed in isolation. We discuss three disciplinary perspectives on the topic and their interplay. Ethical issues arise at the level of changing responsibility structures in healthcare. Behavioral issues relate to the actual impact that the system has on physicians. Technical issues arise with respect to the training of a machine learning (ML) model that gives accurate advice. We argue that the interaction between physicians and MLDSS including the concrete design of the interface in which this interaction occurs can only be considered at the intersection of all three disciplines.}, language = {en} } @inproceedings{KruegelRichterUhl2025, author = {Kr{\"u}gel, Sebastian and Richter, Florian and Uhl, Matthias}, title = {Context-Dependency of Trust in AI-based Systems}, booktitle = {2025 IEEE International Symposium on Technology and Society (ISTAS 2025)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3315-9597-5}, doi = {https://doi.org/10.1109/ISTAS65609.2025.11269654}, year = {2025}, language = {en} } @inproceedings{AmmelingMangerKwakaetal.2023, author = {Ammeling, Jonas and Manger, Carina and Kwaka, Elias and Kr{\"u}gel, Sebastian and Uhl, Matthias and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Riener, Andreas and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Appealing but Potentially Biasing - Investigation of the Visual Representation of Segmentation Predictions by AI Recommender Systems for Medical Decision Making}, booktitle = {Mensch und Computer 2023: Building Bridges: Tagungsband (Proceedings)}, editor = {Stolze, Markus and Loch, Frieder and Baldauf, Matthias and Alt, Florian and Schneegass, Christina and Kosch, Thomas and Hirzle, Teresa and Sadeghian, Shadan and Draxler, Fiona and Bektas, Kenan and Lohan, Katrin and Knierim, Pascal}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-0771-1}, doi = {https://doi.org/10.1145/3603555.3608561}, pages = {330 -- 335}, year = {2023}, language = {en} } @article{KruegelAmmelingAubrevilleetal.2024, author = {Kr{\"u}gel, Sebastian and Ammeling, Jonas and Aubreville, Marc and Fritz, Alexis and Kießig, Angelika and Uhl, Matthias}, title = {Perceived responsibility in AI-supported medicine}, volume = {40}, journal = {AI \& Society: Journal of Knowledge, Culture and Communication}, publisher = {Springer}, address = {London}, issn = {1435-5655}, doi = {https://doi.org/10.1007/s00146-024-01972-6}, pages = {1485 -- 1495}, year = {2024}, abstract = {In a representative vignette study in Germany with 1,653 respondents, we investigated laypeople's attribution of moral responsibility in collaborative medical diagnosis. Specifically, we compare people's judgments in a setting in which physicians are supported by an AI-based recommender system to a setting in which they are supported by a human colleague. It turns out that people tend to attribute moral responsibility to the artificial agent, although this is traditionally considered a category mistake in normative ethics. This tendency is stronger when people believe that AI may become conscious at some point. In consequence, less responsibility is attributed to human agents in settings with hybrid diagnostic teams than in settings with human-only diagnostic teams. Our findings may have implications for behavior exhibited in contexts of collaborative medical decision making with AI-based as opposed to human recommenders because less responsibility is attributed to agents who have the mental capacity to care about outcomes.}, language = {en} } @article{KruegelUhlBalcombe2021, author = {Kr{\"u}gel, Sebastian and Uhl, Matthias and Balcombe, Bryn}, title = {Automated vehicles and the morality of post-collision behavior}, volume = {23}, journal = {Ethics and Information Technology}, number = {4}, publisher = {Springer}, address = {Dordrecht}, issn = {1572-8439}, doi = {https://doi.org/10.1007/s10676-021-09607-w}, pages = {691 -- 701}, year = {2021}, abstract = {We address the considerations of the European Commission Expert Group on the ethics of connected and automated vehicles regarding data provision in the event of collisions. While human drivers' appropriate post-collision behavior is clearly defined, regulations for automated driving do not provide for collision detection. We agree it is important to systematically incorporate citizens' intuitions into the discourse on the ethics of automated vehicles. Therefore, we investigate whether people expect automated vehicles to behave like humans after an accident, even if this behavior does not directly affect the consequences of the accident. We find that appropriate post-collision behavior substantially influences people's evaluation of the underlying crash scenario. Moreover, people clearly think that automated vehicles can and should record the accident, stop at the site, and call the police. They are even willing to pay for technological features that enable post-collision behavior. Our study might begin a research program on post-collision behavior, enriching the empirically informed study of automated driving ethics that so far exclusively focuses on pre-collision behavior.}, language = {en} } @article{MaxUhl2023, author = {Max, Raphael and Uhl, Matthias}, title = {Moral luck in investment contexts: We consciously find unprofitable investments less moral}, volume = {18}, pages = {e0278677}, journal = {PLOS ONE}, number = {1}, publisher = {PLOS}, address = {San Francisco}, issn = {1932-6203}, doi = {https://doi.org/10.1371/journal.pone.0278677}, year = {2023}, abstract = {Moral luck refers to whether an actor is morally praised or blamed for an action whose outcome they could not influence. In two studies, we investigated the behavioral importance of this phenomenon in the realm of investments, which has become increasingly subject to ethical evaluations. In our first online experiment, we examined whether people's moral evaluation of an investment decision depended on its arbitrary outcome and whether their interpretation of the nature of the decision was driven by this outcome. Our results showed that profitable investments were considered more moral than unprofitable investments. Moreover, profitable investments were labeled "investments" instead of "speculation" or "gambling" more often than unprofitable ones. In our second study, we asked the subjects to assess investments independent of the outcome. After the outcome was announced, the subjects were given the opportunity to reflect and change their initial decision. The results show that people change the moral evaluation and label of investments when told that it had a bad outcome. This observation was stable across different investment contexts. These findings suggest that we must be careful with the increasing moralization of investment decisions and be sensitive to our cognitive biases.}, language = {en} } @article{JauernigUhlValentinov2021, author = {Jauernig, Johanna and Uhl, Matthias and Valentinov, Vladislav}, title = {The ethics of corporate hypocrisy: An experimental approach}, volume = {2021}, pages = {102757}, journal = {Futures}, number = {131}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0016-3287}, doi = {https://doi.org/10.1016/j.futures.2021.102757}, year = {2021}, abstract = {In the current landscape of management and business ethics scholarship, a prominent type of dissimulation is exemplified by corporate hypocrisy. The concept of corporate hypocrisy brings traditional morality to bear on the institutions of the modern society and thereby emphasizes the contested relationship between the research programs of individual and institutional ethics. Assuming that morality in the modern society resides in institutions rather than individuals, institutional ethics emphasizes limits to the ability of traditional morality to come to terms with the moral complexity of the market economy. The case of corporate hypocrisy shows however that traditional morality nurtures individual sensitivity to immoral behaviors which may undermine the modern institutional fabric theorized by institutional ethics. This argument is supported by our central experimental finding that the moral evaluation of individual and corporate hypocrisy is driven by essentially the same psychological mechanisms. Moreover, the experiment showed that both corporate and individual hypocrisy are condemned stronger than frankly wrong behavior even if their consequences are identical.}, language = {en} }