@techreport{OlschewskiBluhmFirnkornetal.2023, author = {Olschewski, Detlef and Bluhm, Andr{\´e} and Firnkorn, Joerg and Lucieri, Adriano and Palacio, Sebastian and Streppel, Yeji and Zednik, Carlos and G{\"o}rge, Rebekka and Poretschkin, Maximilian and Becker, Nikolas and Zielke, Thomas and Kruschel, Christian and Neumann-Brosig, Matthias and B{\"a}uerle, Stephen and Eifert, Marton and L{\´o}pez, Erik Martori and Assion, Felix and Seyerlein-Klug, Annegrit and Schmid, Ute and Haufe, Stefan and Gautier, Antoine and Bieringer, Lukas and Besold, Tarek R. and Cremers, Armin B.}, title = {DIN SPEC 92001-3 Artificial Intelligence - Life Cycle Processes and Quality Requirements - Part 3: Explainability}, publisher = {Beuth Verlag}, pages = {24}, year = {2023}, abstract = {DIN SPEC 92001-3 Artificial Intelligence - Life Cycle Processes and Quality Requirements. Artificial Intelligence has become a game-changer, but its impact must be approached responsibly. This is the third document in a series, and it aims to ensure that AI systems are developed, deployed, and used efficiently, responsibly, and in a trustworthy way. It focuses on "Explainability" - the ability to understand how AI makes decisions. This DIN SPEC 92001-3 provides a domain-independent guide on promoting explainability throughout the AI system's life cycle. It tackles "opacity", describing sources and effects of opacity in AI and how explanations can mitigate them. The goal is to foster a better understanding and effective use of Explainable AI across various applications, fostering principles of trust and transparency in AI-based systems. This DIN SPEC is the result of „Zertifizierung von Standard-KI-Anwendungen (Zertifizierte KI project)" https://www.zertifizierte-ki.de/. The publication is available for free at Beuth Verlag GmbH www.beuth.de - plus the search for "DIN SPEC 92001-3"}, language = {en} }