@inproceedings{HashemiFarzanehKugeleKnoll2017, author = {Hashemi Farzaneh, Morteza and Kugele, Stefan and Knoll, Alois}, title = {A graphical modeling tool supporting automated schedule synthesis for time-sensitive networking}, booktitle = {2017 22nd IEEE International Conference on Emerging Technologies and Factory Automation}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-5090-6505-9}, issn = {1946-0759}, doi = {https://doi.org/10.1109/ETFA.2017.8247599}, year = {2017}, language = {en} } @inproceedings{KugeleCebotariGleirscheretal.2017, author = {Kugele, Stefan and Cebotari, Vadim and Gleirscher, Mario and Hashemi Farzaneh, Morteza and Segler, Christoph and Shafaei, Sina and V{\"o}gel, Hans-J{\"o}rg and Bauer, Fridolin and Knoll, Alois and Marmsoler, Diego and Michel, Hans-Ulrich}, title = {Research Challenges for a Future-Proof E/E Architecture: A Project Statement}, booktitle = {INFORMATIK 2017}, editor = {Eibl, Maximilian and Gaedke, Martin}, publisher = {Gesellschaft f{\"u}r Informatik}, address = {Bonn}, isbn = {978-3-88579-669-5}, doi = {https://doi.org/10.18420/in2017_146}, pages = {1463 -- 1474}, year = {2017}, language = {en} } @article{ZhouSongChenetal.2023, author = {Zhou, Liguo and Song, Rui and Chen, Guang and Festag, Andreas and Knoll, Alois}, title = {Residual encoding framework to compress DNN parameters for fast transfer}, volume = {2023}, pages = {110815}, journal = {Knowledge-Based Systems}, number = {277}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1872-7409}, doi = {https://doi.org/10.1016/j.knosys.2023.110815}, year = {2023}, language = {en} } @inproceedings{RothmeierHuberKnoll2024, author = {Rothmeier, Thomas and Huber, Werner and Knoll, Alois}, title = {Time to Shine: Fine-Tuning Object Detection Models with Synthetic Adverse Weather Images}, booktitle = {Proceedings, 2024 IEEE/CVF Winter Conference on Applications of Computer Vision, WACV 2024}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {979-8-3503-1892-0}, doi = {https://doi.org/10.1109/WACV57701.2024.00439}, pages = {4435 -- 4444}, year = {2024}, language = {en} } @inproceedings{SongLyuJiangetal.2023, author = {Song, Rui and Lyu, Lingjuan and Jiang, Wei and Festag, Andreas and Knoll, Alois}, title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection}, booktitle = {CoPerception: Collaborative Perception and Learning, ICRA 2023 workshop}, url = {https://drive.google.com/file/d/1-OQAeNryd7-lksCTuWnhhyydpFSnw5On/view?usp=share_link}, year = {2023}, language = {en} } @unpublished{SongLiangCaoetal.2024, author = {Song, Rui and Liang, Chenwei and Cao, Hu and Yan, Zhiran and Zimmer, Walter and Gross, Markus and Festag, Andreas and Knoll, Alois}, title = {Collaborative Semantic Occupancy Prediction with Hybrid Feature Fusion in Connected Automated Vehicles}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2402.07635}, year = {2024}, abstract = {Collaborative perception in automated vehicles leverages the exchange of information between agents, aiming to elevate perception results. Previous camera-based collaborative 3D perception methods typically employ 3D bounding boxes or bird's eye views as representations of the environment. However, these approaches fall short in offering a comprehensive 3D environmental prediction. To bridge this gap, we introduce the first method for collaborative 3D semantic occupancy prediction. Particularly, it improves local 3D semantic occupancy predictions by hybrid fusion of (i) semantic and occupancy task features, and (ii) compressed orthogonal attention features shared between vehicles. Additionally, due to the lack of a collaborative perception dataset designed for semantic occupancy prediction, we augment a current collaborative perception dataset to include 3D collaborative semantic occupancy labels for a more robust evaluation. The experimental findings highlight that: (i) our collaborative semantic occupancy predictions excel above the results from single vehicles by over 30\%, and (ii) models anchored on semantic occupancy outpace state-of-the-art collaborative 3D detection techniques in subsequent perception applications, showcasing enhanced accuracy and enriched semantic-awareness in road environments.}, language = {en} } @article{SongXuFestagetal.2023, author = {Song, Rui and Xu, Runsheng and Festag, Andreas and Ma, Jiaqi and Knoll, Alois}, title = {FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems}, volume = {9}, journal = {IEEE Transactions on Intelligent Vehicles}, number = {1}, publisher = {IEEE}, address = {New York}, issn = {2379-8904}, doi = {https://doi.org/10.1109/TIV.2023.3310674}, pages = {958 -- 969}, year = {2023}, abstract = {Bird's eye view (BEV) perception is becoming increasingly important in the field of autonomous driving. It uses multi-view camera data to learn a transformer model that directly projects the perception of the road environment onto the BEV perspective. However, training a transformer model often requires a large amount of data, and as camera data for road traffic are often private, they are typically not shared. Federated learning offers a solution that enables clients to collaborate and train models without exchanging data but model parameters. In this paper, we introduce FedBEVT, a federated transformer learning approach for BEV perception. In order to address two common data heterogeneity issues in FedBEVT: (i) diverse sensor poses, and (ii) varying sensor numbers in perception systems, we propose two approaches - Federated Learning with Camera-Attentive Personalization (FedCaP) and Adaptive Multi-Camera Masking (AMCM), respectively. To evaluate our method in real-world settings, we create a dataset consisting of four typical federated use cases. Our findings suggest that FedBEVT outperforms the baseline approaches in all four use cases, demonstrating the potential of our approach for improving BEV perception in autonomous driving.}, language = {en} } @unpublished{SongLyuJiangetal.2023, author = {Song, Rui and Lyu, Lingjuan and Jiang, Wei and Festag, Andreas and Knoll, Alois}, title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2305.11654}, year = {2023}, abstract = {Machine learning (ML) has revolutionized transportation systems, enabling autonomous driving and smart traffic services. Federated learning (FL) overcomes privacy constraints by training ML models in distributed systems, exchanging model parameters instead of raw data. However, the dynamic states of connected vehicles affect the network connection quality and influence the FL performance. To tackle this challenge, we propose a contextual client selection pipeline that uses Vehicle-to-Everything (V2X) messages to select clients based on the predicted communication latency. The pipeline includes: (i) fusing V2X messages, (ii) predicting future traffic topology, (iii) pre-clustering clients based on local data distribution similarity, and (iv) selecting clients with minimal latency for future model aggregation. Experiments show that our pipeline outperforms baselines on various datasets, particularly in non-iid settings.}, language = {en} } @inproceedings{NairShafaeiKugeleetal.2019, author = {Nair, Saasha and Shafaei, Sina and Kugele, Stefan and Osman, Mohd Hafeez and Knoll, Alois}, title = {Monitoring Safety of Autonomous Vehicles with Crash Prediction Network}, booktitle = {Proceedings of the AAAI Workshop on Artificial Intelligence Safety 2019, co-located with the Thirty-Third AAAI Conference on Artificial Intelligence 2019 (AAAI 2019)}, editor = {Espinoza, Hu{\´a}scar and {\´O}h{\´E}igeartaigh, Se{\´a}n S. and Huang, Xiaowei and Hern{\´a}ndez-Orallo, Jos{\´e} and Castillo-Effen, Mauricio}, publisher = {RWTH Aachen}, address = {Aachen}, url = {https://ceur-ws.org/Vol-2301/}, year = {2019}, language = {en} }