@inproceedings{PratherLeinonenKiesleretal.2024, author = {Prather, James and Leinonen, Juho and Kiesler, Natalie and Benario, Jamie Gorson and Lau, Sam and MacNeil, Stephen and Norouzi, Narges and Opel, Simone and Pettit, Virginia and Porter, Leo and Reeves, Brent N. and Savelka, Jaromir and Smith, David H. and Strickroth, Sven and Zingaro, Daniel}, title = {How Instructors Incorporate Generative AI into Teaching Computing}, series = {Proceedings of the 2024 on Innovation and Technology in Computer Science Education Vol. 2}, booktitle = {Proceedings of the 2024 on Innovation and Technology in Computer Science Education Vol. 2}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3649405.3659534}, pages = {771 -- 772}, year = {2024}, abstract = {Generative AI (GenAI) has seen great advancements in the past two years and the conversation around adoption is increasing. Widely available GenAI tools are disrupting classroom practices as they can write and explain code with minimal student prompting. While most acknowledge that there is no way to stop students from using such tools, a consensus has yet to form on how students should use them if they choose to do so. At the same time, researchers have begun to introduce new pedagogical tools that integrate GenAI into computing curricula. These new tools offer students personalized help or attempt to teach prompting skills without undercutting code comprehension. This working group aims to detail the current landscape of education-focused GenAI tools and teaching approaches, present gaps where new tools or approaches could appear, identify good practice-examples, and provide a guide for instructors to utilize GenAI as they continue to adapt to this new era.}, language = {en} } @inproceedings{AzaizKieslerStrickroth2024, author = {Azaiz, Imen and Kiesler, Natalie and Strickroth, Sven}, title = {Feedback-Generation for Programming Exercises With GPT-4}, series = {Proceedings of the 2024 on Innovation and Technology in Computer Science Education Vol. 1}, booktitle = {Proceedings of the 2024 on Innovation and Technology in Computer Science Education Vol. 1}, publisher = {ACM}, address = {New York, NY, USA}, doi = {10.1145/3649217.3653594}, pages = {31 -- 37}, year = {2024}, abstract = {Ever since Large Language Models (LLMs) and related applications have become broadly available, several studies investigated their potential for assisting educators and supporting students in higher education. LLMs such as Codex, GPT-3.5, and GPT 4 have shown promising results in the context of large programming courses, where students can benefit from feedback and hints if provided timely and at scale. This paper explores the quality of GPT-4 Turbo's generated output for prompts containing both the programming task specification and a student's submission as input. Two assignments from an introductory programming course were selected, and GPT-4 was asked to generate feedback for 55 randomly chosen, authentic student programming submissions. The output was qualitatively analyzed regarding correctness, personalization, fault localization, and other features identified in the material. Compared to prior work and analyses of GPT-3.5, GPT-4 Turbo shows notable improvements. For example, the output is more structured and consistent. GPT-4 Turbo can also accurately identify invalid casing in student programs' output. In some cases, the feedback also includes the output of the student program. At the same time, inconsistent feedback was noted such as stating that the submission is correct but an error needs to be fixed. The present work increases our understanding of LLMs' potential, limitations, and how to integrate them into e-assessment systems, pedagogical scenarios, and instructing students who are using applications based on GPT-4.}, language = {en} } @inproceedings{KieslerRoepkeSchiffneretal.2024, author = {Kiesler, Natalie and R{\"o}pke, Ren{\´e} and Schiffner, Daniel and Schulz, Sandra and Strickroth, Sven and Ehlenz, Matthias and Heinemann, Birte and Wilhelm-Weidner, Arno}, title = {Towards Open Science at the DELFI Conference}, series = {22. Fachtagung Bildungstechnologien (DELFI)}, booktitle = {22. Fachtagung Bildungstechnologien (DELFI)}, editor = {Schulz, Sandra and Kiesler, Natalie}, doi = {10.18420/delfi2024_22}, pages = {251 -- 265}, year = {2024}, abstract = {Despite the increasing awareness of Open Science within the educational technology community, conferences, such as DELFI, do not yet foster the publication of research data including software. To address this, we conducted a survey eliciting the community's needs, perspectives, and publication preferences. The analysis of 24 valid responses reveals a variety of research data formats used, and several uncertainties, e. g., regarding data ownership. Associated barriers comprise legal concerns and lacking resources to publish data. Nonetheless, researchers seem open for new publication formats. Moreover, we analyzed author's intentions to publish data related to their DELFI submissions in 2023 (n=66). Many researchers assume not to have data to share (n=28), or no intention to publish data in the future (n=16). Overall, the results imply a lack of awareness and recognition of data publications, so that further efforts and incentives are required to move toward Open Science practices in the DELFI community.}, language = {en} }