@article{KatariaLevatiUhl2014, author = {Kataria, Mitesh and Levati, Maria Vittoria and Uhl, Matthias}, title = {Paternalism with hindsight: do prot{\´e}g{\´e}s react consequentialistically to paternalism?}, volume = {43}, journal = {Social Choice and Welfare}, number = {3}, publisher = {Springer}, address = {Berlin}, issn = {1432-217X}, doi = {https://doi.org/10.1007/s00355-014-0800-4}, pages = {731 -- 746}, year = {2014}, language = {en} } @inbook{SchoenmannUhl2023, author = {Sch{\"o}nmann, Manuela and Uhl, Matthias}, title = {Eine ethische Perspektive auf KI in der Bildung}, booktitle = {K{\"u}nstliche Intelligenz in der Bildung}, editor = {de Witt, Claudia and Gloerfeld, Christina and Wrede, Silke Elisabeth}, publisher = {Springer VS}, address = {Wiesbaden}, isbn = {978-3-658-40079-8}, doi = {https://doi.org/10.1007/978-3-658-40079-8_21}, pages = {433 -- 453}, year = {2023}, language = {de} } @unpublished{RosbachAmmelingKruegeletal.2024, author = {Rosbach, Emely and Ammeling, Jonas and Kr{\"u}gel, Sebastian and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Puget, Chlo{\´e} and Donovan, Taryn and Klang, Andrea and K{\"o}ller, Maximilian C. and Bolfa, Pompei and Tecilla, Marco and Denk, Daniela and Kiupel, Matti and Paraschou, Georgios and Kok, Mun Keong and Haake, Alexander F. H. and de Krijger, Ronald R. and Sonnen, Andreas F.-P. and Kasantikul, Tanit and Dorrestein, Gerry M. and Smedley, Rebecca C. and Stathonikos, Nikolas and Uhl, Matthias and Bertram, Christof and Riener, Andreas and Aubreville, Marc}, title = {"When TwoWrongs Don't Make a Right" - Examining Confirmation Bias and the Role of Time Pressure During Human-AI Collaboration in Computational Pathology}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2411.01007}, year = {2024}, language = {en} } @article{BodenschatzUhlWalkowitz2021, author = {Bodenschatz, Anja and Uhl, Matthias and Walkowitz, Gari}, title = {Autonomous systems in ethical dilemmas}, volume = {2021}, pages = {100145}, journal = {Computers in human behavior reports}, subtitle = {attitudes toward randomization}, number = {4}, publisher = {Amsterdam}, address = {Elsevier}, issn = {2451-9588}, doi = {https://doi.org/10.1016/j.chbr.2021.100145}, year = {2021}, abstract = {It is ethically debatable whether autonomous systems should be programmed to actively impose harm on some to avoid greater harm for others. Surveys on ethical dilemmas in self-driving cars' programming have shown that people favor imposing harm on some people to save others from suffering and are consequently willing to sacrifice smaller groups to save larger ones in unavoidable accident situations. This is, if people are forced to directly impose harm. Contrary to humans, autonomous systems feature a salient deontological alternative for immediate decisions: the ability to randomize decisions over dilemmatic outcomes. To be applicable in democracies, randomization must correspond to people's moral intuition. In three studies (N = 935), we present empirical evidence that many people prefer to randomize between dilemmatic outcomes due to moral considerations. We find these preferences in hypothetical and incentivized decision-making situations. We also find that preferences are robust in different contexts and persist across Germany, with its Kantian cultural tradition, and the US, with its utilitarian cultural tradition.}, language = {en} } @article{GrundherrJauernigUhl2021, author = {Grundherr, Michael von and Jauernig, Johanna and Uhl, Matthias}, title = {To condemn is not to punish}, volume = {12}, pages = {38}, journal = {Games}, subtitle = {an experiment on hypocrisy}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2073-4336}, doi = {https://doi.org/10.3390/g12020038}, year = {2021}, abstract = {Hypocrisy is the act of claiming moral standards to which one's own behavior does not conform. Instances of hypocrisy, such as the supposedly green furnishing group IKEA's selling of furniture made from illegally felled wood, are frequently reported in the media. In a controlled and incentivized experiment, we investigate how observers rate different types of hypocritical behavior and if this judgment also translates into punishment. Results show that observers do, indeed, condemn hypocritical behavior strongly. The aversion to deceptive behavior is, in fact, so strong that even purely self-deceptive behavior is regarded as blameworthy. Observers who score high in the moral identity test have particularly strong reactions to acts of hypocrisy. The moral condemnation of hypocritical behavior, however, fails to produce a proportional amount of punishment. Punishment seems to be driven more by the violation of the norm of fair distribution than by moral pretense. From the viewpoint of positive retributivism, it is problematic if neither formal nor informal punishment follows moral condemnation.}, language = {en} } @article{MigrowUhl2011, author = {Migrow, Dimitri and Uhl, Matthias}, title = {The Resolution Game: A Dual Selves Perspective}, volume = {2}, journal = {Games}, number = {4}, publisher = {MDPI}, address = {Basel}, issn = {2073-4336}, doi = {https://doi.org/10.3390/g2040452}, pages = {452 -- 462}, year = {2011}, abstract = {This article explains the emergence of an unique equilibrium resolution as the result of a compromise between two selves with different preferences. The stronger this difference is, the more generous the resolution gets. This result is in contrast to predictions of other models in which sinful consumption is distributed bimodally. Therefore, our result fits better with our daily observations concerning a lot of ambivalent goods where we often form nonrigid resolutions. The normative analysis uses the device of a hypothetical impartial self that regards both conflicting motives as equally legitimate. The result of this analysis is dilemmatic. It demonstrates that the resolution is broken too often to be welfare maximal. However, the introduction of external self-commitment devices results in their overuse and is welfare decreasing.}, language = {en} } @inbook{Uhl2022, author = {Uhl, Matthias}, title = {Order Ethics}, booktitle = {Evolving Business Ethics}, subtitle = {A Contemporary Ethics for the Digital Society}, editor = {L{\"u}tge, Christoph and Ziegler, Thejls}, publisher = {J.B. Metzler}, address = {Stuttgart}, isbn = {978-3-476-05844-7}, doi = {https://doi.org/10.1007/978-3-476-05845-4_7}, pages = {93 -- 98}, year = {2022}, language = {en} } @article{AmmelingAubrevilleFritzetal.2024, author = {Ammeling, Jonas and Aubreville, Marc and Fritz, Alexis and Kießig, Angelika and Kr{\"u}gel, Sebastian and Uhl, Matthias}, title = {An interdisciplinary perspective on AI-supported decision making in medicine}, volume = {2025}, pages = {102791}, journal = {Technology in Society}, number = {81}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1879-3274}, doi = {https://doi.org/10.1016/j.techsoc.2024.102791}, year = {2024}, abstract = {Artificial intelligence (AI)-supported medical diagnosis offers the potential to utilize the collaborative intelligence of context-sensitive humans and narrowly focused machines for patients' benefit. The employment of machine-learning-based decision-support systems (MLDSS) in medicine, however, raises important multidisciplinary challenges that cannot be addressed in isolation. We discuss three disciplinary perspectives on the topic and their interplay. Ethical issues arise at the level of changing responsibility structures in healthcare. Behavioral issues relate to the actual impact that the system has on physicians. Technical issues arise with respect to the training of a machine learning (ML) model that gives accurate advice. We argue that the interaction between physicians and MLDSS including the concrete design of the interface in which this interaction occurs can only be considered at the intersection of all three disciplines.}, language = {en} } @inproceedings{KruegelRichterUhl2025, author = {Kr{\"u}gel, Sebastian and Richter, Florian and Uhl, Matthias}, title = {Context-Dependency of Trust in AI-based Systems}, booktitle = {2025 IEEE International Symposium on Technology and Society (ISTAS 2025)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3315-9597-5}, doi = {https://doi.org/10.1109/ISTAS65609.2025.11269654}, year = {2025}, language = {en} } @inproceedings{AmmelingMangerKwakaetal.2023, author = {Ammeling, Jonas and Manger, Carina and Kwaka, Elias and Kr{\"u}gel, Sebastian and Uhl, Matthias and Kießig, Angelika and Fritz, Alexis and Ganz, Jonathan and Riener, Andreas and Bertram, Christof and Breininger, Katharina and Aubreville, Marc}, title = {Appealing but Potentially Biasing - Investigation of the Visual Representation of Segmentation Predictions by AI Recommender Systems for Medical Decision Making}, booktitle = {Mensch und Computer 2023: Building Bridges: Tagungsband (Proceedings)}, editor = {Stolze, Markus and Loch, Frieder and Baldauf, Matthias and Alt, Florian and Schneegass, Christina and Kosch, Thomas and Hirzle, Teresa and Sadeghian, Shadan and Draxler, Fiona and Bektas, Kenan and Lohan, Katrin and Knierim, Pascal}, publisher = {ACM}, address = {New York}, isbn = {979-8-4007-0771-1}, doi = {https://doi.org/10.1145/3603555.3608561}, pages = {330 -- 335}, year = {2023}, language = {en} } @article{KruegelAmmelingAubrevilleetal.2024, author = {Kr{\"u}gel, Sebastian and Ammeling, Jonas and Aubreville, Marc and Fritz, Alexis and Kießig, Angelika and Uhl, Matthias}, title = {Perceived responsibility in AI-supported medicine}, volume = {40}, journal = {AI \& Society: Journal of Knowledge, Culture and Communication}, publisher = {Springer}, address = {London}, issn = {1435-5655}, doi = {https://doi.org/10.1007/s00146-024-01972-6}, pages = {1485 -- 1495}, year = {2024}, abstract = {In a representative vignette study in Germany with 1,653 respondents, we investigated laypeople's attribution of moral responsibility in collaborative medical diagnosis. Specifically, we compare people's judgments in a setting in which physicians are supported by an AI-based recommender system to a setting in which they are supported by a human colleague. It turns out that people tend to attribute moral responsibility to the artificial agent, although this is traditionally considered a category mistake in normative ethics. This tendency is stronger when people believe that AI may become conscious at some point. In consequence, less responsibility is attributed to human agents in settings with hybrid diagnostic teams than in settings with human-only diagnostic teams. Our findings may have implications for behavior exhibited in contexts of collaborative medical decision making with AI-based as opposed to human recommenders because less responsibility is attributed to agents who have the mental capacity to care about outcomes.}, language = {en} }