@article{ZhangFegerDullenkopfetal.2024, author = {Zhang, and Feger, Sebastian and Dullenkopf, and Liao, and S{\"u}sslin, and Liu, and Butz,}, title = {Beyond Recommendations: From Backward to Forward AI Support of Pilots' Decision-Making Process}, series = {Proceedings of the ACM on Human-Computer Interaction}, volume = {8}, journal = {Proceedings of the ACM on Human-Computer Interaction}, number = {CSCW2}, doi = {10.1145/3687024}, year = {2024}, abstract = {AI is anticipated to enhance human decision-making in high-stakes domains like aviation, but adoption is often hindered by challenges such as inappropriate reliance and poor alignment with users' decision-making. Recent research suggests that a core underlying issue is the recommendation-centric design of many AI systems, i.e., they give end-to-end recommendations and ignore the rest of the decision-making process. Alternative support paradigms are rare, and it remains unclear how the few that do exist compare to recommendation-centric support. In this work, we aimed to empirically compare recommendation-centric support to an alternative paradigm, continuous support, in the context of diversions in aviation. We conducted a mixed-methods study with 32 professional pilots in a realistic setting. To ensure the quality of our study scenarios, we conducted a focus group with four additional pilots prior to the study. We found that continuous support can support pilots' decision-making in a forward direction, allowing them to think more beyond the limits of the system and make faster decisions when combined with recommendations, though the forward support can be disrupted. Participants' statements further suggest a shift in design goal away from providing recommendations, to supporting quick information gathering. Our results show ways to design more helpful and effective AI decision support that goes beyond end-to-end recommendations.}, language = {en} } @inproceedings{MuchaRobertBreitschwerdtetal.2021, author = {Mucha, Henrik and Robert, Sebastian and Breitschwerdt, Ruediger and Fellmann, Michael}, title = {Interfaces for Explanations in Human-AI Interaction: Proposing a Design Evaluation Approach}, series = {Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, New York, NY, USA}, booktitle = {Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems. Association for Computing Machinery, New York, NY, USA}, pages = {327}, year = {2021}, abstract = {Explanations in Human-AI Interaction are communicated to human decision makers through interfaces. Yet, it is not clear what consequences the exact representation of such explanations as part of decision support systems (DSS) and working on machine learning (ML) models has on human decision making. We observe a need for research methods that allow for measuring the effect different eXplainable AI (XAI) interface designs have on people's decision making. In this paper, we argue for adopting research approaches from decision theory for HCI research on XAI interface design. We outline how we used estimation tasks in human-grounded design research in order to introduce a method and measurement for collecting evidence on XAI interface effects. To this end, we investigated representations of LIME explanations in an estimation task online study as proof-of-concept for our proposal.}, language = {en} }