@unpublished{RoessleXieMohanetal.2026, author = {R{\"o}ßle, Dominik and Xie, Xujun and Mohan, Adithya and Thirugnana Sambandham, Venkatesh and Cremers, Daniel and Sch{\"o}n, Torsten}, title = {DrivIng: A Large-Scale Multimodal Driving Dataset with Full Digital Twin Integration}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2601.15260}, year = {2026}, language = {en} } @article{MohanSchoen2026, author = {Mohan, Adithya and Sch{\"o}n, Torsten}, title = {Toward Robust Agents: A Survey of Adversarial Attacks and Defenses in Deep Reinforcement Learning}, volume = {14}, journal = {IEEE Access}, publisher = {IEEE}, address = {New York}, issn = {2169-3536}, doi = {https://doi.org/10.1109/ACCESS.2026.3657855}, pages = {14481 -- 14497}, year = {2026}, abstract = {Deep Reinforcement Learning (DRL) has demonstrated remarkable success in autonomous decision-making across diverse domains, including robotics, autonomous driving, and game playing. However, recent studies have uncovered a critical vulnerability: DRL agents are highly susceptible to adversarial attacks that can significantly degrade their performance or lead to catastrophic failure. These attacks exploit different components of the learning pipeline observations, actions, rewards, and policies exposing new challenges unique to DRL compared to supervised learning. This survey provides a comprehensive examination of adversarial threats and corresponding defense mechanisms within the DRL paradigm. This also aims to serve as a foundational reference for researchers and practitioners seeking to understand and mitigate adversarial vulnerabilities in DRL.}, language = {en} } @unpublished{ChandraSekaranGeislerRoessleetal.2025, author = {Chandra Sekaran, Karthikeyan and Geisler, Markus and R{\"o}ßle, Dominik and Mohan, Adithya and Cremers, Daniel and Utschick, Wolfgang and Botsch, Michael and Huber, Werner and Sch{\"o}n, Torsten}, title = {UrbanIng-V2X: A Large-Scale Multi-Vehicle, Multi-Infrastructure Dataset Across Multiple Intersections for Cooperative Perception}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2510.23478}, year = {2025}, language = {en} } @unpublished{MohanRoessleCremersetal.2025, author = {Mohan, Adithya and R{\"o}ßle, Dominik and Cremers, Daniel and Sch{\"o}n, Torsten}, title = {Advancing Robustness in Deep Reinforcement Learning with an Ensemble Defense Approach}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2507.17070}, year = {2025}, language = {en} } @inproceedings{KarpenahalliRamakrishnaMohanZeinalyetal.2025, author = {Karpenahalli Ramakrishna, Chidvilas and Mohan, Adithya and Zeinaly, Zahra and Belzner, Lenz}, title = {The Evolution of Criticality in Deep Reinforcement Learning}, booktitle = {Proceedings of the 17th International Conference on Agents and Artificial Intelligence (ICAART 2025) - Volume 3}, editor = {Rocha, Ana Paula and Steels, Luc and van den Herik, Jaap}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-737-5}, doi = {https://doi.org/10.5220/0013114200003890}, pages = {217 -- 224}, year = {2025}, abstract = {In Reinforcement Learning (RL), certain states demand special attention due to their significant influence on outcomes; these are identified as critical states. The concept of criticality is essential for the development of effective and robust policies and to improve overall trust in RL agents in real-world applications like autonomous driving. The current paper takes a deep dive into criticality and studies the evolution of criticality throughout training. The experiments are conducted on a new, simple yet intuitive continuous cliff maze environment and the Highway-env autonomous driving environment. Here, a novel finding is reported that criticality is not only learnt by the agent but can also be unlearned. We hypothesize that diversity in experiences is necessary for effective criticality quantification which is majorly driven by the chosen exploration strategy. This close relationship between exploration and criticality is studied utilizing two different strategies namely the ex ponential ε-decay and the adaptive ε-decay. The study supports the idea that effective exploration plays a crucial role in accurately identifying and understanding critical states.}, language = {en} }