@unpublished{SongLiuChenetal.2022, author = {Song, Rui and Liu, Dai and Chen, Dave Zhenyu and Festag, Andreas and Trinitis, Carsten and Schulz, Martin and Knoll, Alois}, title = {Federated Learning via Decentralized Dataset Distillation in Resource Constrained Edge Environments}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2208.11311}, year = {2022}, abstract = {In federated learning, all networked clients contribute to the model training cooperatively. However, with model sizes increasing, even sharing the trained partial models often leads to severe communication bottlenecks in underlying networks, especially when communicated iteratively. In this paper, we introduce a federated learning framework FedD3 requiring only one-shot communication by integrating dataset distillation instances. Instead of sharing model updates in other federated learning approaches, FedD3 allows the connected clients to distill the local datasets independently, and then aggregates those decentralized distilled datasets (e.g. a few unrecognizable images) from networks for model training. Our experimental results show that FedD3 significantly outperforms other federated learning frameworks in terms of needed communication volumes, while it provides the additional benefit to be able to balance the trade-off between accuracy and communication cost, depending on usage scenario or target dataset. For instance, for training an AlexNet model on CIFAR-10 with 10 clients under non-independent and identically distributed (Non-IID) setting, FedD3 can either increase the accuracy by over 71\% with a similar communication volume, or save 98\% of communication volume, while reaching the same accuracy, compared to other one-shot federated learning approaches.}, language = {en} } @unpublished{SongLiangXiaetal.2025, author = {Song, Rui and Liang, Chenwei and Xia, Yan and Zimmer, Walter and Cao, Hu and Caesar, Holger and Festag, Andreas and Knoll, Alois}, title = {CoDa-4DGS: Dynamic Gaussian Splatting with Context and Deformation Awareness for Autonomous Driving}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2503.06744}, year = {2025}, abstract = {Dynamic scene rendering opens new avenues in autonomous driving by enabling closed-loop simulations with photorealistic data, which is crucial for validating end-to-end algorithms. However, the complex and highly dynamic nature of traffic environments presents significant challenges in accurately rendering these scenes. In this paper, we introduce a novel 4D Gaussian Splatting (4DGS) approach, which incorporates context and temporal deformation awareness to improve dynamic scene rendering. Specifically, we employ a 2D semantic segmentation foundation model to self-supervise the 4D semantic features of Gaussians, ensuring meaningful contextual embedding. Simultaneously, we track the temporal deformation of each Gaussian across adjacent frames. By aggregating and encoding both semantic and temporal deformation features, each Gaussian is equipped with cues for potential deformation compensation within 3D space, facilitating a more precise representation of dynamic scenes. Experimental results show that our method improves 4DGS's ability to capture fine details in dynamic scene rendering for autonomous driving and outperforms other self-supervised methods in 4D reconstruction and novel view synthesis. Furthermore, CoDa-4DGS deforms semantic features with each Gaussian, enabling broader applications.}, language = {en} } @unpublished{HanXuKefferpuetzetal.2024, author = {Han, Longfei and Xu, Qiuyu and Kefferp{\"u}tz, Klaus and Elger, Gordon and Beyerer, J{\"u}rgen}, title = {Applying Extended Object Tracking for Self-Localization of Roadside Radar Sensors}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2407.03084}, year = {2024}, abstract = {Intelligent Transportation Systems (ITS) can benefit from roadside 4D mmWave radar sensors for large-scale traffic monitoring due to their weatherproof functionality, long sensing range and low manufacturing cost. However, the localization method using external measurement devices has limitations in urban environments. Furthermore, if the sensor mount exhibits changes due to environmental influences, they cannot be corrected when the measurement is performed only during the installation. In this paper, we propose self-localization of roadside radar data using Extended Object Tracking (EOT). The method analyses both the tracked trajectories of the vehicles observed by the sensor and the aerial laser scan of city streets, assigns labels of driving behaviors such as "straight ahead", "left turn", "right turn" to trajectory sections and road segments, and performs Semantic Iterative Closest Points (SICP) algorithm to register the point cloud. The method exploits the result from a down stream task -- object tracking -- for localization. We demonstrate high accuracy in the sub-meter range along with very low orientation error. The method also shows good data efficiency. The evaluation is done in both simulation and real-world tests.}, language = {en} } @unpublished{HanKefferpuetzBeyerer2025, author = {Han, Longfei and Kefferp{\"u}tz, Klaus and Beyerer, J{\"u}rgen}, title = {3D Extended Object Tracking based on Extruded B-Spline Side View Profiles}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2503.10730}, year = {2025}, abstract = {Object tracking is an essential task for autonomous systems. With the advancement of 3D sensors, these systems can better perceive their surroundings using effective 3D Extended Object Tracking (EOT) methods. Based on the observation that common road users are symmetrical on the right and left sides in the traveling direction, we focus on the side view profile of the object. In order to leverage of the development in 2D EOT and balance the number of parameters of a shape model in the tracking algorithms, we propose a method for 3D extended object tracking (EOT) by describing the side view profile of the object with B-spline curves and forming an extrusion to obtain a 3D extent. The use of B-spline curves exploits their flexible representation power by allowing the control points to move freely. The algorithm is developed into an Extended Kalman Filter (EKF). For a through evaluation of this method, we use simulated traffic scenario of different vehicle models and realworld open dataset containing both radar and lidar data.}, language = {en} } @inproceedings{SongFestagJagtapetal.2024, author = {Song, Rui and Festag, Andreas and Jagtap, Abhishek Dinkar and Bialdyga, Maximilian and Yan, Zhiran and Otte, Maximilian and Sadashivaiah, Sanath Tiptur and Knoll, Alois}, title = {First Mile: An Open Innovation Lab for Infrastructure-Assisted Cooperative Intelligent Transportation Systems}, booktitle = {2024 IEEE Intelligent Vehicles Symposium (IV)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-4881-1}, doi = {https://doi.org/10.1109/IV55156.2024.10588500}, pages = {1635 -- 1642}, year = {2024}, language = {en} } @inproceedings{HegdeLoboFestag2022, author = {Hegde, Anupama and Lobo, Silas and Festag, Andreas}, title = {Cellular-V2X for Vulnerable Road User Protection in Cooperative ITS}, booktitle = {2022 18th International Conference on Wireless and Mobile Computing, Networking and Communications (WiMob)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-6975-3}, doi = {https://doi.org/10.1109/WiMob55322.2022.9941707}, pages = {118 -- 123}, year = {2022}, language = {en} } @inproceedings{SongLiuChenetal.2023, author = {Song, Rui and Liu, Dai and Chen, Dave Zhenyu and Festag, Andreas and Trinitis, Carsten and Schulz, Martin and Knoll, Alois}, title = {Federated Learning via Decentralized Dataset Distillation in Resource-Constrained Edge Environments}, booktitle = {IJCNN 2023 Conference Proceedings}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-8867-9}, doi = {https://doi.org/10.1109/IJCNN54540.2023.10191879}, year = {2023}, language = {en} } @inproceedings{MeessGernerHeinetal.2022, author = {Meess, Henri and Gerner, Jeremias and Hein, Daniel and Schmidtner, Stefanie and Elger, Gordon}, title = {Reinforcement Learning for Traffic Signal Control Optimization: A Concept for Real-World Implementation}, booktitle = {AAMAS '22: Proceedings of the 21st International Conference on Autonomous Agents and Multiagent Systems}, publisher = {International Foundation for Autonomous Agents and Multiagent Systems}, address = {Richland}, isbn = {978-1-4503-9213-6}, doi = {https://dl.acm.org/doi/10.5555/3535850.3536081}, pages = {1699 -- 1701}, year = {2022}, language = {en} } @inproceedings{FritzscheFestag2018, author = {Fritzsche, Richard and Festag, Andreas}, title = {Reliability Maximization with Location-Based Scheduling for Cellular-V2X Communications in Highway Scenarios}, booktitle = {2018 16th International Conference on Intelligent Transportation Systems Telecommunications (ITST) Proceedings}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-5386-5544-3}, doi = {https://doi.org/10.1109/ITST.2018.8566935}, year = {2018}, language = {en} } @inproceedings{AgrawalSongDoychevaetal.2023, author = {Agrawal, Shiva and Song, Rui and Doycheva, Kristina and Knoll, Alois and Elger, Gordon}, title = {Intelligent Roadside Infrastructure for Connected Mobility}, booktitle = {Smart Cities, Green Technologies, and Intelligent Transport Systems: 11th International Conference, SMARTGREENS 2022 and 8th International Conference, VEHITS 2022: Revised Selected Papers}, editor = {Klein, Cornel and Jarke, Matthias and Ploeg, Jeroen and Helfert, Markus and Berns, Karsten and Gusikhin, Oleg}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-37470-8}, issn = {1865-0937}, doi = {https://doi.org/10.1007/978-3-031-37470-8_6}, pages = {134 -- 157}, year = {2023}, language = {en} } @inproceedings{AgrawalBhanderiAmanagietal.2023, author = {Agrawal, Shiva and Bhanderi, Savankumar and Amanagi, Sumit and Doycheva, Kristina and Elger, Gordon}, title = {Instance Segmentation and Detection of Children to Safeguard Vulnerable Traffic User by Infrastructure}, booktitle = {Proceedings of the 9th International Conference on Vehicle Technology and Intelligent Transport Systems}, editor = {Vinel, Alexey and Ploeg, Jeroen and Berns, Karsten and Gusikhin, Oleg}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-652-1}, issn = {2184-495X}, doi = {https://doi.org/10.5220/0011825400003479}, pages = {206 -- 214}, year = {2023}, abstract = {Cameras mounted on intelligent roadside infrastructure units and vehicles can detect humans on the road using state-of-the-art perception algorithms, but these algorithms are presently not trained to distinguish between human and adult. However, this is a crucial requirement from a safety perspective because a child may not follow all the traffic rules, particularly while crossing the road. Moreover, a child may stop or may start playing on the road. In such situations, the separation of a child from an adult is necessary. The work in this paper targets to solve this problem by applying a transfer-learning-based neural network approach to classify child and adult separately in camera images. The described work is comprised of image data collection, data annotation, transfer learning-based model development, and evaluation. For the work, Mask-RCNN (region-based convolutional neural network) with different backbone architectures and two different baselines are investigated and the perception precision of the architectures after transfer-learning is compared. The results reveal that the best performing trained model is able to detect and classify children and adults separately in different road scenarios with segmentation mask AP (average precision) of 85\% and bounding box AP of 92\%.}, language = {en} } @article{AgrawalBhanderiElger2025, author = {Agrawal, Shiva and Bhanderi, Savankumar and Elger, Gordon}, title = {Infra-3DRC-FusionNet: Deep Fusion of Roadside Mounted RGB Mono Camera and Three-Dimensional Automotive Radar for Traffic User Detection}, volume = {25}, pages = {3422}, journal = {Sensors}, number = {11}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {https://doi.org/10.3390/s25113422}, year = {2025}, abstract = {Mono RGB cameras and automotive radar sensors provide a complementary information set that makes them excellent candidates for sensor data fusion to obtain robust traffic user detection. This has been widely used in the vehicle domain and recently introduced in roadside-mounted smart infrastructure-based road user detection. However, the performance of the most commonly used late fusion methods often degrades when the camera fails to detect road users in adverse environmental conditions. The solution is to fuse the data using deep neural networks at the early stage of the fusion pipeline to use the complete data provided by both sensors. Research has been carried out in this area, but is limited to vehicle-based sensor setups. Hence, this work proposes a novel deep neural network to jointly fuse RGB mono-camera images and 3D automotive radar point cloud data to obtain enhanced traffic user detection for the roadside-mounted smart infrastructure setup. Projected radar points are first used to generate anchors in image regions with a high likelihood of road users, including areas not visible to the camera. These anchors guide the prediction of 2D bounding boxes, object categories, and confidence scores. Valid detections are then used to segment radar points by instance, and the results are post-processed to produce final road user detections in the ground plane. The trained model is evaluated for different light and weather conditions using ground truth data from a lidar sensor. It provides a precision of 92\%, recall of 78\%, and F1-score of 85\%. The proposed deep fusion methodology has 33\%, 6\%, and 21\% absolute improvement in precision, recall, and F1-score, respectively, compared to object-level spatial fusion output.}, language = {en} } @article{DaRosaZanattaCarvalhoLustosadaCostaAntreichetal.2020, author = {Da Rosa Zanatta, Mateus and Carvalho Lustosa da Costa, Joao Paulo and Antreich, Felix and Haardt, Martin and Elger, Gordon and Lopes de Mendon{\c{c}}a, F{\´a}bio L{\´u}cio and de Sousa Junior, Rafael Tim{\´o}teo}, title = {Tensor-based framework with model order selection and high accuracy factor decomposition for time-delay estimation in dynamic multipath scenarios}, volume = {8}, journal = {IEEE Access}, publisher = {IEEE}, address = {New York}, issn = {2169-3536}, doi = {https://doi.org/10.1109/ACCESS.2020.3024597}, pages = {174931 -- 174942}, year = {2020}, abstract = {Global Navigation Satellite Systems (GNSS) are crucial for applications that demand very accurate positioning. Tensor-based time-delay estimation methods, such as CPD-GEVD, DoA/KRF, and SECSI, combined with the GPS3 L1C signal, are capable of, significantly, mitigating the positioning degradation caused by multipath components. However, even though these schemes require an estimated model order, they assume that the number of multipath components is constant. In GNSS applications, the number of multipath components is time-varying in dynamic scenarios. Thus, in this paper, we propose a tensor-based framework with model order selection and high accuracy factor decomposition for time-delay estimation in dynamic multipath scenarios. Our proposed approach exploits the estimates of the model order for each slice by grouping the data tensor slices into sub-tensors to provide high accuracy factor decomposition. We further enhance the proposed approach by incorporating the tensor-based Multiple Denoising (MuDe).}, language = {en} } @article{DeloozWilleckeGarlichsetal.2022, author = {Delooz, Quentin and Willecke, Alexander and Garlichs, Keno and Hagau, Andreas-Christian and Wolf, Lars and Vinel, Alexey and Festag, Andreas}, title = {Analysis and Evaluation of Information Redundancy Mitigation for V2X Collective Perception}, volume = {10}, journal = {IEEE Access}, publisher = {IEEE}, address = {New York}, issn = {2169-3536}, doi = {https://doi.org/10.1109/ACCESS.2022.3170029}, pages = {47076 -- 47093}, year = {2022}, abstract = {Sensor data sharing enables vehicles to exchange locally perceived sensor data among each other and with the roadside infrastructure to increase their environmental awareness. It is commonly regarded as a next-generation vehicular communication service beyond the exchange of highly aggregated messages in the first generation. The approach is being considered in the European standardization process, where it relies on the exchange of locally detected objects representing anything safety-relevant, such as other vehicles or pedestrians, in periodically broadcasted messages to vehicles in direct communication range. Objects filtering methods for inclusion in a message are necessary to avoid overloading a channel and provoking unnecessary data processing. Initial studies provided in a pre-standardization report about sensor data sharing elaborated a first set of rules to filter objects based on their characteristics, such as their dynamics or type. However, these rules still lack the consideration of information received by other stations to operate. Specifically, to address the problem of information redundancy, several rules have been proposed, but their performance has not been evaluated yet comprehensively. In the present work, the rules are further analyzed, assessed, and compared. Functional and operational requirements are investigated. A performance evaluation is realized by discrete-event simulations in a scenario for a representative city with realistic vehicle densities and mobility patterns. A score and other redundancy-level metrics are elaborated to ease the evaluation and comparison of the filtering rules. Finally, improvements and future works to the filtering methods are proposed.}, language = {en} } @article{SongZhouLyuetal.2023, author = {Song, Rui and Zhou, Liguo and Lyu, Lingjuan and Festag, Andreas and Knoll, Alois}, title = {ResFed: Communication-Efficient Federated Learning With Deep Compressed Residuals}, volume = {11}, journal = {IEEE Internet of Things Journal}, number = {6}, publisher = {IEEE}, address = {New York}, issn = {2327-4662}, doi = {https://doi.org/10.1109/JIOT.2023.3324079}, pages = {9458 -- 9472}, year = {2023}, abstract = {Federated learning allows for cooperative training among distributed clients by sharing their locally learned model parameters, such as weights or gradients. However, as model size increases, the communication bandwidth required for deployment in wireless networks becomes a bottleneck. To address this, we propose a residual-based federated learning framework (ResFed) that transmits residuals instead of gradients or weights in networks. By predicting model updates at both clients and the server, residuals are calculated as the difference between updated and predicted models and contain more dense information than weights or gradients. We find that the residuals are less sensitive to an increasing compression ratio than other parameters, and hence use lossy compression techniques on residuals to improve communication efficiency for training in federated settings. With the same compression ratio, ResFed outperforms current methods (weight- or gradient-based federated learning) by over 1.4× on federated data sets, including MNIST, FashionMNIST, SVHN, CIFAR-10, CIFAR-100, and FEMNIST, in client-to-server communication, and can also be applied to reduce communication costs for server-to-client communication.}, language = {en} } @article{HegdeSongFestag2023, author = {Hegde, Anupama and Song, Rui and Festag, Andreas}, title = {Radio Resource Allocation in 5G-NR V2X: A Multi-Agent Actor-Critic Based Approach}, volume = {11}, journal = {IEEE Access}, publisher = {IEEE}, address = {New York}, issn = {2169-3536}, doi = {https://doi.org/10.1109/ACCESS.2023.3305267}, pages = {87225 -- 87244}, year = {2023}, abstract = {The efficiency of radio resource allocation and scheduling procedures in Cellular Vehicle-to-X (Cellular V2X) communication networks directly affects link quality in terms of latency and reliability. However, owing to the continuous movement of vehicles, it is impossible to have a centralized coordinating unit at all times to manage the allocation of radio resources. In the unmanaged mode of the fifth generation new radio (5G-NR) V2X, the sensing-based semi-persistent scheduling (SB-SPS) loses its effectiveness when V2X data messages become aperiodic with varying data sizes. This leads to misinformed resource allocation decisions among vehicles and frequent resource collisions. To improve resource selection, this study formulates the Cellular V2X communication network as a decentralized multi-agent networked markov decision process (MDP) where each vehicle agent executes an actor-critic-based radio resource scheduler. Developing further the actor-critic methodology for the radio resource allocation problem in Cellular V2X, two variants are derived: independent actor-critic (IAC) and shared experience actor-critic (SEAC). Results from simulation studies indicate that the actor-critic schedulers improve reliability, achieving a 15-20\% higher probability of reception under high vehicular density scenarios with aperiodic traffic patterns.}, language = {en} } @article{AgrawalBhanderiElger2024, author = {Agrawal, Shiva and Bhanderi, Savankumar and Elger, Gordon}, title = {Semi-Automatic Annotation of 3D Radar and Camera for Smart Infrastructure-Based Perception}, volume = {12}, journal = {IEEE Access}, publisher = {IEEE}, address = {New York}, issn = {2169-3536}, doi = {https://doi.org/10.1109/ACCESS.2024.3373310}, pages = {34325 -- 34341}, year = {2024}, abstract = {Environment perception using camera, radar, and/or lidar sensors has significantly improved in the last few years because of deep learning-based methods. However, a large group of these methods fall into the category of supervised learning, which requires a considerable amount of annotated data. Due to uncertainties in multi-sensor data, automating the data labeling process is extremely challenging; hence, it is performed manually to a large extent. Even though full automation of such a process is difficult, semiautomation can be a significant step to ease this process. However, the available work in this regard is still very limited; hence, in this paper, a novel semi-automatic annotation methodology is developed for labeling RGB camera images and 3D automotive radar point cloud data using a smart infrastructure-based sensor setup. This paper also describes a new method for 3D radar background subtraction to remove clutter and a new object category, GROUP, for radar-based object detection for closely located vulnerable road users. To validate the work, a dataset named INFRA-3DRC is created using this methodology, where 75\% of the labels are automatically generated. In addition, a radar cluster classifier and an image classifier are developed, trained, and tested on this dataset, achieving accuracy of 98.26\% and 94.86\%, respectively. The dataset and Python scripts are available at https://fraunhoferivi.github.io/INFRA-3DRC-Dataset/.}, language = {en} } @unpublished{SongZhouLyuetal.2022, author = {Song, Rui and Zhou, Liguo and Lyu, Lingjuan and Festag, Andreas and Knoll, Alois}, title = {ResFed: Communication Efficient Federated Learning by Transmitting Deep Compressed Residuals}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2212.05602}, year = {2022}, language = {en} } @inproceedings{FestagUdupaGarciaetal.2021, author = {Festag, Andreas and Udupa, Shrivatsa and Garcia, Lourdes and Wellens, Ralf and Hecht, Matthias and Ulfig, Pierre}, title = {End-to-End Performance Measurements of Drone Communications in 5G Cellular Networks}, booktitle = {2021 IEEE 94th Vehicular Technology Conference (VTC2021-Fall): Proceedings}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-1368-8}, doi = {https://doi.org/10.1109/VTC2021-Fall52928.2021.9625429}, year = {2021}, language = {en} } @inproceedings{FestagSong2021, author = {Festag, Andreas and Song, Rui}, title = {Analysis of Existing Approaches for Information Sharing in Cooperative Intelligent Transport Systems}, booktitle = {FISITA World Congress 2021}, subtitle = {SENSORIS and V2X Messaging}, publisher = {FISITA}, address = {Bishops Stortford}, url = {https://www.fisita.com/library/f2020-acm-012}, year = {2021}, language = {en} } @inproceedings{ElgerMokhtariBhogarajuetal.2020, author = {Elger, Gordon and Mokhtari, Omid and Bhogaraju, Sri Krishna and Conti, Fosca and Meier, Markus and Schweigart, Helmut}, title = {Analyse der Reaktionsprodukte von Metall-Formiaten im r{\"u}ckstandfreien L{\"o}tprozess}, booktitle = {GMM-Fb. 94: EBL 2020 - Elektronische Baugruppen und Leiterplatten}, publisher = {VDE Verlag}, address = {Berlin}, isbn = {978-3-8007-5185-3}, doi = {https://www.vde-verlag.de/proceedings-de/455185053.html}, year = {2020}, language = {de} } @inproceedings{SenelUdupaElger2021, author = {Senel, Numan and Udupa, Shrivatsa and Elger, Gordon}, title = {Sensor data preprocessing in road-side sensor units}, booktitle = {FISITA World Congress 2021}, publisher = {FISITA}, address = {Bishops Stortford}, url = {https://www.fisita.com/library/f2021-acm-120}, year = {2021}, language = {en} } @inproceedings{StreckSchmokSchneideretal.2021, author = {Streck, Egor and Schmok, Peter and Schneider, Klaus and Erdogan, H{\"u}seyin and Elger, Gordon}, title = {Safeguarding future autonomous traffic by infrastructure based on multi radar sensor systems}, booktitle = {FISITA World Congress 2021}, publisher = {FISITA}, address = {Bishops Stortford}, url = {https://www.fisita.com/library/f2021-acm-121}, year = {2021}, language = {en} } @inproceedings{MeessGernerHeinetal.2022, author = {Meess, Henri and Gerner, Jeremias and Hein, Daniel and Schmidtner, Stefanie and Elger, Gordon}, title = {Real World Traffic Optimization by Reinforcement Learning: A Concept}, booktitle = {International Workshop on Agent-Based Modelling of Urban Systems (ABMUS) Proceedings: 2022}, editor = {Kieu, Minh Le and Dam, Koen H. van and Thompson, Jason and Malleson, Nick and Heppenstall, Alison and Ge, Jiaqi}, publisher = {figshare}, address = {[s. l.]}, doi = {https://doi.org/10.6084/m9.figshare.19733800.v1}, pages = {49 -- 54}, year = {2022}, language = {en} } @inproceedings{ZippeliusStroblSchmidetal.2022, author = {Zippelius, Andreas and Strobl, Tobias and Schmid, Maximilian and Hermann, Joseph and Hoffmann, Alwin and Elger, Gordon}, title = {Predicting thermal resistance of solder joints based on Scanning Acoustic Microscopy using Artificial Neural Networks}, booktitle = {2022 IEEE 9th Electronics System-Integration Technology Conference (ESTC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-8947-8}, doi = {https://doi.org/10.1109/ESTC55720.2022.9939465}, pages = {566 -- 575}, year = {2022}, language = {en} } @inproceedings{SchmidHermannBhogarajuetal.2022, author = {Schmid, Maximilian and Hermann, Joseph and Bhogaraju, Sri Krishna and Elger, Gordon}, title = {Reliability of SAC Solders under Low and High Stress Conditions}, booktitle = {2022 IEEE 9th Electronics System-Integration Technology Conference (ESTC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-8947-8}, doi = {https://doi.org/10.1109/ESTC55720.2022.9939394}, pages = {553 -- 559}, year = {2022}, language = {en} } @inproceedings{SongHegdeSeneletal.2022, author = {Song, Rui and Hegde, Anupama and Senel, Numan and Knoll, Alois and Festag, Andreas}, title = {Edge-Aided Sensor Data Sharing in Vehicular Communication Networks}, booktitle = {2022 IEEE 95th Vehicular Technology Conference: (VTC2022-Spring) Proceedings}, publisher = {IEEE}, address = {Piscataway (NJ)}, isbn = {978-1-6654-8243-1}, issn = {2577-2465}, doi = {https://doi.org/10.1109/VTC2022-Spring54318.2022.9860849}, year = {2022}, language = {en} } @inproceedings{MaksimovskiFacchiFestag2021, author = {Maksimovski, Daniel and Facchi, Christian and Festag, Andreas}, title = {Priority Maneuver (PriMa) Coordination for Connected and Automated Vehicles}, booktitle = {2021 IEEE International Intelligent Transportation Systems Conference (ITSC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-9142-3}, doi = {https://doi.org/10.1109/ITSC48978.2021.9564923}, pages = {1083 -- 1089}, year = {2021}, language = {en} } @article{DeloozFestagVinel2021, author = {Delooz, Quentin and Festag, Andreas and Vinel, Alexey}, title = {Congestion Aware Objects Filtering for Collective Perception}, volume = {80}, journal = {Electronic Communications of the EASST}, publisher = {TU Berlin}, address = {Berlin}, issn = {1863-2122}, doi = {http://dx.doi.org/10.14279/tuj.eceasst.80.1160}, year = {2021}, abstract = {This paper addresses collective perception for connected and automated driving. It proposes the adaptation of filtering rules based on the currently available channel resources, referred to as Enhanced DCC-Aware Filtering (EDAF).}, language = {en} } @inproceedings{VolkDeloozSchieggetal.2021, author = {Volk, Georg and Delooz, Quentin and Schiegg, Florian and Bernuth, Alexander von and Festag, Andreas and Bringmann, Oliver}, title = {Towards Realistic Evaluation of Collective Perception for Connected and Automated Driving}, booktitle = {2021 IEEE International Intelligent Transportation Systems Conference (ITSC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-9142-3}, doi = {https://doi.org/10.1109/ITSC48978.2021.9564783}, pages = {1049 -- 1056}, year = {2021}, language = {en} } @inproceedings{HegdeFestag2021, author = {Hegde, Anupama and Festag, Andreas}, title = {Mode Switching Performance in Cellular-V2X}, booktitle = {2020 IEEE Vehicular Networking Conference (VNC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-9221-5}, doi = {https://doi.org/10.1109/VNC51378.2020.9318394}, year = {2021}, language = {en} } @inproceedings{DeloozRieblFestagetal.2021, author = {Delooz, Quentin and Riebl, Raphael and Festag, Andreas and Vinel, Alexey}, title = {Design and Performance of Congestion-Aware Collective Perception}, booktitle = {2020 IEEE Vehicular Networking Conference (VNC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-9221-5}, doi = {https://doi.org/10.1109/VNC51378.2020.9318335}, year = {2021}, language = {en} } @inproceedings{SenelElgerFestag2020, author = {Senel, Numan and Elger, Gordon and Festag, Andreas}, title = {Sensor Time Synchronization in Smart Road Infrastructure}, booktitle = {FISITA Web Congress 2020}, publisher = {FISITA}, address = {Bishops Stortford}, url = {https://www.fisita.com/library/f2020-acm-083}, year = {2020}, language = {en} } @inproceedings{MaksimovskiFacchiFestag2022, author = {Maksimovski, Daniel and Facchi, Christian and Festag, Andreas}, title = {Cooperative Driving: Research on Generic Decentralized Maneuver Coordination for Connected and Automated Vehicles}, booktitle = {Smart Cities, Green Technologies, and Intelligent Transport Systems: 10th International Conference, SMARTGREENS 2021, and 7th International Conference, VEHITS 2021, Virtual Event, April 28-30, 2021, Revised Selected Papers}, editor = {Klein, Cornel and Jarke, Matthias and Helfert, Markus and Berns, Karsten and Gusikhin, Oleg}, publisher = {Springer}, address = {Cham}, isbn = {978-3-031-17097-3}, doi = {https://doi.org/10.1007/978-3-031-17098-0_18}, pages = {348 -- 370}, year = {2022}, language = {en} } @inproceedings{SongZhouLakshminarasimhanetal.2022, author = {Song, Rui and Zhou, Liguo and Lakshminarasimhan, Venkatnarayanan and Festag, Andreas and Knoll, Alois}, title = {Federated Learning Framework Coping with Hierarchical Heterogeneity in Cooperative ITS}, booktitle = {2022 IEEE 25th International Conference on Intelligent Transportation Systems (ITSC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-6880-0}, pages = {3502 -- 3508}, year = {2022}, language = {en} } @inproceedings{DeloozFestagVineletal.2023, author = {Delooz, Quentin and Festag, Andreas and Vinel, Alexey and Lobo, Silas}, title = {Simulation-Based Performance Optimization of V2X Collective Perception by Adaptive Object Filtering}, booktitle = {2023 IEEE Intelligent Vehicles Symposium (IV): Proceedings}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-4691-6}, doi = {https://doi.org/10.1109/IV55152.2023.10186788}, year = {2023}, language = {en} } @article{KettelgerdesElger2023, author = {Kettelgerdes, Marcel and Elger, Gordon}, title = {In-Field Measurement and Methodology for Modeling and Validation of Precipitation Effects on Solid-State LiDAR Sensors}, volume = {7}, journal = {IEEE Journal of Radio Frequency Identification}, publisher = {Institute of Electrical and Electronics Engineers (IEEE)}, address = {New York}, issn = {2469-7281}, doi = {https://doi.org/10.1109/JRFID.2023.3234999}, pages = {192 -- 202}, year = {2023}, language = {en} } @inproceedings{KettelgerdesElger2022, author = {Kettelgerdes, Marcel and Elger, Gordon}, title = {Modeling Methodology and In-field Measurement Setup to Develop Empiric Weather Models for Solid-State LiDAR Sensors}, booktitle = {2022 IEEE 2nd International Conference on Digital Twins and Parallel Intelligence (DTPI)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-9227-0}, doi = {https://doi.org/10.1109/DTPI55838.2022.9998918}, year = {2022}, language = {en} } @inproceedings{HegdeDeloozMariyakllaetal.2023, author = {Hegde, Anupama and Delooz, Quentin and Mariyaklla, Chethan L. and Festag, Andreas and Klingler, Florian}, title = {Radio Resource Allocation for Collective Perception in 5G-NR Vehicle-to-X Communication Systems}, booktitle = {2023 IEEE Wireless Communications and Networking Conference (WCNC): Proceedings}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-9122-8}, issn = {1558-2612}, doi = {https://doi.org/10.1109/WCNC55385.2023.10118606}, year = {2023}, language = {en} } @article{SenelKefferpuetzDoychevaetal.2023, author = {Senel, Numan and Kefferp{\"u}tz, Klaus and Doycheva, Kristina and Elger, Gordon}, title = {Multi-Sensor Data Fusion for Real-Time Multi-Object Tracking}, volume = {11}, pages = {501}, journal = {Processes}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2227-9717}, doi = {https://doi.org/10.3390/pr11020501}, year = {2023}, abstract = {Sensor data fusion is essential for environmental perception within smart traffic applications. By using multiple sensors cooperatively, the accuracy and probability of the perception are increased, which is crucial for critical traffic scenarios or under bad weather conditions. In this paper, a modular real-time capable multi-sensor fusion framework is presented and tested to fuse data on the object list level from distributed automotive sensors (cameras, radar, and LiDAR). The modular multi-sensor fusion architecture receives an object list (untracked objects) from each sensor. The fusion framework combines classical data fusion algorithms, as it contains a coordinate transformation module, an object association module (Hungarian algorithm), an object tracking module (unscented Kalman filter), and a movement compensation module. Due to the modular design, the fusion framework is adaptable and does not rely on the number of sensors or their types. Moreover, the method continues to operate because of this adaptable design in case of an individual sensor failure. This is an essential feature for safety-critical applications. The architecture targets environmental perception in challenging time-critical applications. The developed fusion framework is tested using simulation and public domain experimental data. Using the developed framework, sensor fusion is obtained well below 10 milliseconds of computing time using an AMD Ryzen 7 5800H mobile processor and the Python programming language. Furthermore, the object-level multi-sensor approach enables the detection of changes in the extrinsic calibration of the sensors and potential sensor failures. A concept was developed to use the multi-sensor framework to identify sensor malfunctions. This feature will become extremely important in ensuring the functional safety of the sensors for autonomous driving.}, language = {en} } @inproceedings{HermannSchmidElger2022, author = {Hermann, Joseph and Schmid, Maximilian and Elger, Gordon}, title = {Crack Growth Prediction in High-Power LEDs from TTA, SAM and Simulated Data}, booktitle = {THERMINIC 2022: Proceedings 2022}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-9229-4}, doi = {https://doi.org/10.1109/THERMINIC57263.2022.9950673}, year = {2022}, language = {en} } @article{ZhouSongChenetal.2023, author = {Zhou, Liguo and Song, Rui and Chen, Guang and Festag, Andreas and Knoll, Alois}, title = {Residual encoding framework to compress DNN parameters for fast transfer}, volume = {2023}, pages = {110815}, journal = {Knowledge-Based Systems}, number = {277}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1872-7409}, doi = {https://doi.org/10.1016/j.knosys.2023.110815}, year = {2023}, language = {en} } @inproceedings{KettelgerdesMezmerHaeussleretal.2023, author = {Kettelgerdes, Marcel and Mezmer, Peter and Haeussler, Michael J. and B{\"o}ttger, Gunnar and Tavakolibasti, Majid and Pandey, Amit and Erdogan, H{\"u}seyin and Elger, Gordon and Schacht, Ralph and Wunderle, Bernhard}, title = {Realization, multi-field coupled simulation and characterization of a thermo-mechanically robust LiDAR front end on a copper coated glass substrate}, booktitle = {Proceedings: IEEE 73rd Electronic Components and Technology Conference, ECTC 2023}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-3498-2}, issn = {2377-5726}, doi = {https://doi.org/10.1109/ECTC51909.2023.00131}, pages = {753 -- 760}, year = {2023}, language = {en} } @article{AgrawalBhanderiDoychevaetal.2023, author = {Agrawal, Shiva and Bhanderi, Savankumar and Doycheva, Kristina and Elger, Gordon}, title = {Static multi-target-based auto-calibration of RGB cameras, 3D Radar, and 3D Lidar sensors}, volume = {23}, journal = {IEEE Sensors Journal}, number = {18}, publisher = {IEEE}, address = {Piscataway}, issn = {1530-437X}, doi = {https://doi.org/10.1109/JSEN.2023.3300957}, pages = {21493 -- 21505}, year = {2023}, language = {en} } @unpublished{BazziSepulcreDeloozetal.2023, author = {Bazzi, Alessandro and Sepulcre, Miguel and Delooz, Quentin and Festag, Andreas and Vogt, Jonas and Wieker, Horst and Berens, Friedbert and Spaanderman, Paul}, title = {Multi-Channel Operation for the Release 2 of ETSI Cooperative Intelligent Transport Systems}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2305.19863}, year = {2023}, abstract = {Vehicles and road infrastructure are starting to be equipped with vehicle-to-everything (V2X) communication solutions to increase road safety and provide new services to drivers and passengers. In Europe, the deployment is based on a set of Release 1 standards developed by ETSI to support basic use cases for cooperative intelligent transport systems (C-ITS). For them, the capacity of a single 10 MHz channel in the ITS band at 5.9 GHz is considered sufficient. At the same time, the ITS stakeholders are working towards several advanced use cases, which imply a significant increment of data traffic and the need for multiple channels. To address this issue, ETSI has recently standardized a new multi-channel operation (MCO) concept for flexible, efficient, and future-proof use of multiple channels. This new concept is defined in a set of new specifications that represent the foundation for the future releases of C-ITS standards. The present paper provides a comprehensive review of the new set of specifications, describing the main entities extending the C-ITS architecture at the different layers of the protocol stack, In addition, the paper provides representative examples that describe how these MCO standards will be used in the future and discusses some of the main open issues arising. The review and analysis of this paper facilitate the understanding and motivation of the new set of Release 2 ETSI specifications for MCO and the identification of new research opportunities.}, language = {en} } @article{KettelgerdesSarmientoErdoganetal.2024, author = {Kettelgerdes, Marcel and Sarmiento, Nicolas and Erdogan, H{\"u}seyin and Wunderle, Bernhard and Elger, Gordon}, title = {Precise Adverse Weather Characterization by Deep-Learning-Based Noise Processing in Automotive LiDAR Sensors}, volume = {16}, pages = {2407}, journal = {Remote Sensing}, number = {13}, publisher = {MDPI}, address = {Basel}, issn = {2072-4292}, doi = {https://doi.org/10.3390/rs16132407}, year = {2024}, abstract = {With current advances in automated driving, optical sensors like cameras and LiDARs are playing an increasingly important role in modern driver assistance systems. However, these sensors face challenges from adverse weather effects like fog and precipitation, which significantly degrade the sensor performance due to scattering effects in its optical path. Consequently, major efforts are being made to understand, model, and mitigate these effects. In this work, the reverse research question is investigated, demonstrating that these measurement effects can be exploited to predict occurring weather conditions by using state-of-the-art deep learning mechanisms. In order to do so, a variety of models have been developed and trained on a recorded multiseason dataset and benchmarked with respect to performance, model size, and required computational resources, showing that especially modern vision transformers achieve remarkable results in distinguishing up to 15 precipitation classes with an accuracy of 84.41\% and predicting the corresponding precipitation rate with a mean absolute error of less than 0.47 mm/h, solely based on measurement noise. Therefore, this research may contribute to a cost-effective solution for characterizing precipitation with a commercial Flash LiDAR sensor, which can be implemented as a lightweight vehicle software feature to issue advanced driver warnings, adapt driving dynamics, or serve as a data quality measure for adaptive data preprocessing and fusion.}, language = {en} } @inproceedings{SongLyuJiangetal.2023, author = {Song, Rui and Lyu, Lingjuan and Jiang, Wei and Festag, Andreas and Knoll, Alois}, title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection}, booktitle = {CoPerception: Collaborative Perception and Learning, ICRA 2023 workshop}, url = {https://drive.google.com/file/d/1-OQAeNryd7-lksCTuWnhhyydpFSnw5On/view?usp=share_link}, year = {2023}, language = {en} } @unpublished{SongLiangCaoetal.2024, author = {Song, Rui and Liang, Chenwei and Cao, Hu and Yan, Zhiran and Zimmer, Walter and Gross, Markus and Festag, Andreas and Knoll, Alois}, title = {Collaborative Semantic Occupancy Prediction with Hybrid Feature Fusion in Connected Automated Vehicles}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2402.07635}, year = {2024}, abstract = {Collaborative perception in automated vehicles leverages the exchange of information between agents, aiming to elevate perception results. Previous camera-based collaborative 3D perception methods typically employ 3D bounding boxes or bird's eye views as representations of the environment. However, these approaches fall short in offering a comprehensive 3D environmental prediction. To bridge this gap, we introduce the first method for collaborative 3D semantic occupancy prediction. Particularly, it improves local 3D semantic occupancy predictions by hybrid fusion of (i) semantic and occupancy task features, and (ii) compressed orthogonal attention features shared between vehicles. Additionally, due to the lack of a collaborative perception dataset designed for semantic occupancy prediction, we augment a current collaborative perception dataset to include 3D collaborative semantic occupancy labels for a more robust evaluation. The experimental findings highlight that: (i) our collaborative semantic occupancy predictions excel above the results from single vehicles by over 30\%, and (ii) models anchored on semantic occupancy outpace state-of-the-art collaborative 3D detection techniques in subsequent perception applications, showcasing enhanced accuracy and enriched semantic-awareness in road environments.}, language = {en} } @inproceedings{HanKefferpuetzElgeretal.2024, author = {Han, Longfei and Kefferp{\"u}tz, Klaus and Elger, Gordon and Beyerer, J{\"u}rgen}, title = {FlexSense: Flexible Infrastructure Sensors for Traffic Perception}, booktitle = {2023 IEEE 26th International Conference on Intelligent Transportation Systems (ITSC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-9946-2}, doi = {https://doi.org/10.1109/ITSC57777.2023.10422616}, pages = {3810 -- 3816}, year = {2024}, language = {en} } @article{SongXuFestagetal.2023, author = {Song, Rui and Xu, Runsheng and Festag, Andreas and Ma, Jiaqi and Knoll, Alois}, title = {FedBEVT: Federated Learning Bird's Eye View Perception Transformer in Road Traffic Systems}, volume = {9}, journal = {IEEE Transactions on Intelligent Vehicles}, number = {1}, publisher = {IEEE}, address = {New York}, issn = {2379-8904}, doi = {https://doi.org/10.1109/TIV.2023.3310674}, pages = {958 -- 969}, year = {2023}, abstract = {Bird's eye view (BEV) perception is becoming increasingly important in the field of autonomous driving. It uses multi-view camera data to learn a transformer model that directly projects the perception of the road environment onto the BEV perspective. However, training a transformer model often requires a large amount of data, and as camera data for road traffic are often private, they are typically not shared. Federated learning offers a solution that enables clients to collaborate and train models without exchanging data but model parameters. In this paper, we introduce FedBEVT, a federated transformer learning approach for BEV perception. In order to address two common data heterogeneity issues in FedBEVT: (i) diverse sensor poses, and (ii) varying sensor numbers in perception systems, we propose two approaches - Federated Learning with Camera-Attentive Personalization (FedCaP) and Adaptive Multi-Camera Masking (AMCM), respectively. To evaluate our method in real-world settings, we create a dataset consisting of four typical federated use cases. Our findings suggest that FedBEVT outperforms the baseline approaches in all four use cases, demonstrating the potential of our approach for improving BEV perception in autonomous driving.}, language = {en} } @inproceedings{HanXuKefferpuetzetal.2024, author = {Han, Longfei and Xu, Qiuyu and Kefferp{\"u}tz, Klaus and Lu, Ying and Elger, Gordon and Beyerer, J{\"u}rgen}, title = {Scalable Radar-based Roadside Perception: Self-localization and Occupancy Heat Map for Traffic Analysis}, booktitle = {2024 IEEE Intelligent Vehicles Symposium (IV)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-4881-1}, doi = {https://doi.org/10.1109/iv55156.2024.10588397}, pages = {1651 -- 1657}, year = {2024}, language = {en} } @inproceedings{OlcayMeessElger2024, author = {Olcay, Ertug and Meeß, Henri and Elger, Gordon}, title = {Dynamic Obstacle Avoidance for UAVs using MPC and GP-Based Motion Forecast}, booktitle = {2024 European Control Conference (ECC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-3-9071-4410-7}, doi = {https://doi.org/10.23919/ECC64448.2024.10591083}, pages = {1024 -- 1031}, year = {2024}, language = {en} } @inproceedings{KettelgerdesPandeyUnruhetal.2024, author = {Kettelgerdes, Marcel and Pandey, Amit and Unruh, Denis and Erdogan, H{\"u}seyin and Wunderle, Bernhard and Elger, Gordon}, title = {Automotive LiDAR Based Precipitation State Estimation Using Physics Informed Spatio-Temporal 3D Convolutional Neural Networks (PIST-CNN)}, booktitle = {2023 29th International Conference on Mechatronics and Machine Vision in Practice (M2VIP)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-2562-1}, doi = {https://doi.org/10.1109/M2VIP58386.2023.10413394}, year = {2024}, language = {en} } @inproceedings{AgrawalSongKohlietal.2022, author = {Agrawal, Shiva and Song, Rui and Kohli, Akhil and Korb, Andreas and Andre, Maximilian and Holzinger, Erik and Elger, Gordon}, title = {Concept of Smart Infrastructure for Connected Vehicle Assist and Traffic Flow Optimization}, booktitle = {Proceedings of the 8th International Conference on Vehicle Technology and Intelligent Transport Systems}, editor = {Ploeg, Jeroen and Helfert, Markus and Berns, Karsten and Gusikhin, Oleg}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-573-9}, issn = {2184-495X}, doi = {https://doi.org/10.5220/0011068800003191}, pages = {360 -- 367}, year = {2022}, abstract = {The smart infrastructure units can play a vital role to develop smart cities of the future and in assisting automated vehicles on the road by providing extended perception and timely warnings to avoid accidents. This paper focuses on the development of such an infrastructure unit, that is specifically designed for a pedestrian crossing junction. It can control traffic lights at the junction by real-time environment perception through its sensors and can optimize the flow of vehicles and passing vulnerable road users (VRUs). Moreover, it can assist on-road vehicles by providing real-time information and critical warnings via a v2x module. This paper further describes different use-cases of the work, all major hardware components involved in the development of smart infrastructure unit, referred to as an edge, different sensor fusion approaches using the camera, radar, and lidar mounted on the edge for environment perception, various modes of communication including v2x, system design}, language = {en} } @inproceedings{StreckHerschelWallrathetal.2022, author = {Streck, Egor and Herschel, Reinhold and Wallrath, Patrick and Sunderam, M. and Elger, Gordon}, title = {Comparison of Two Different Radar Concepts for Pedestrian Protection on Bus Stops}, booktitle = {Proceedings of the 11th International Conference on Sensor Networks}, editor = {Prasad, Venkatesha and Pesch, Dirk and Ansari, Nirwan and Benavente-Peces, C{\´e}sar}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-551-7}, issn = {2184-4380}, doi = {https://doi.org/10.5220/0010777100003118}, pages = {89 -- 96}, year = {2022}, abstract = {This paper presents the joint work from the "HORIS" project, with a focus on pedestrian detection at bus-stops by radar sensors mounted in the infrastructure to support future autonomous driving and protecting pedestrians in critical situations. Two sensor systems are investigated and evaluated. The first based on single radar sensor phase-sensitive raw data analysis and the second based on sensor data fusion of cluster data with two radar sensors using neural networks to predict the position of pedestrians.}, language = {en} } @unpublished{SongLyuJiangetal.2023, author = {Song, Rui and Lyu, Lingjuan and Jiang, Wei and Festag, Andreas and Knoll, Alois}, title = {V2X-Boosted Federated Learning for Cooperative Intelligent Transportation Systems with Contextual Client Selection}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2305.11654}, year = {2023}, abstract = {Machine learning (ML) has revolutionized transportation systems, enabling autonomous driving and smart traffic services. Federated learning (FL) overcomes privacy constraints by training ML models in distributed systems, exchanging model parameters instead of raw data. However, the dynamic states of connected vehicles affect the network connection quality and influence the FL performance. To tackle this challenge, we propose a contextual client selection pipeline that uses Vehicle-to-Everything (V2X) messages to select clients based on the predicted communication latency. The pipeline includes: (i) fusing V2X messages, (ii) predicting future traffic topology, (iii) pre-clustering clients based on local data distribution similarity, and (iv) selecting clients with minimal latency for future model aggregation. Experiments show that our pipeline outperforms baselines on various datasets, particularly in non-iid settings.}, language = {en} } @inproceedings{JagtapSongSadashivaiahetal.2025, author = {Jagtap, Abhishek Dinkar and Song, Rui and Sadashivaiah, Sanath Tiptur and Festag, Andreas}, title = {V2X-Gaussians: Gaussian Splatting for Multi-Agent Cooperative Dynamic Scene Reconstruction}, booktitle = {2025 IEEE Intelligent Vehicles Symposium (IV)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3315-3803-3}, doi = {https://doi.org/10.1109/IV64158.2025.11097436}, pages = {1033 -- 1039}, year = {2025}, language = {en} } @article{BhanderiAgrawalElger2025, author = {Bhanderi, Savankumar and Agrawal, Shiva and Elger, Gordon}, title = {Deep segmentation of 3+1D radar point cloud for real-time roadside traffic user detection}, volume = {15}, pages = {38489}, journal = {Scientific Reports}, publisher = {Springer Nature}, address = {London}, issn = {2045-2322}, doi = {https://doi.org/10.1038/s41598-025-23019-6}, year = {2025}, abstract = {Smart cities rely on intelligent infrastructure to enhance road safety, optimize traffic flow, and enable vehicle-to-infrastructure (V2I) communication. A key component of such infrastructure is an efficient and real-time perception system that accurately detects diverse traffic participants. Among various sensing modalities, automotive radar is one of the best choices due to its robust performance in adverse weather and low-light conditions. However, due to low spatial resolution, traditional clustering-based approaches for radar object detection often struggle with vulnerable road user detection and nearby object separation. Hence, this paper proposes a deep learning-based D radar point cloud clustering methodology tailored for smart infrastructure-based perception applications. This approach first performs semantic segmentation of the radar point cloud, followed by instance segmentation to generate well-formed clusters with class labels using a deep neural network. It also detects single-point objects that conventional methods often miss. The described approach is developed and experimented using a smart infrastructure-based sensor setup and it performs segmentation of the point cloud in real-time. Experimental results demonstrate 95.35\% F1-macro score for semantic segmentation and 91.03\% mean average precision (mAP) at an intersection over union (IoU) threshold of 0.5 for instance segmentation. Further, the complete pipeline operates at 43.61 frames per second with a memory requirement of less than 0.7 MB on the edge device (Nvidia Jetson AGX Orin).}, language = {en} } @inproceedings{MaksimovskiFestagFacchi2021, author = {Maksimovski, Daniel and Festag, Andreas and Facchi, Christian}, title = {A Survey on Decentralized Cooperative Maneuver Coordination for Connected and Automated Vehicles}, booktitle = {Proceedings of the 7th International Conference on Vehicle Technology and Intelligent Transport Systems}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-513-5}, issn = {2184-495X}, doi = {https://doi.org/10.5220/0010442501000111}, pages = {100 -- 111}, year = {2021}, abstract = {V2X communications can be applied for maneuver coordination of automated vehicles, where the vehicles exchange messages to inform each other of their driving intentions and to negotiate for joint maneuvers. For motion and maneuver planning of automated vehicles, the cooperative maneuver coordination extends the perception range of the sensors, enhances the planning horizon and allows complex interactions among the vehicles. For specific scenarios, various schemes for maneuver coordination of connected automated vehicles exist. Recently, several proposals for maneuver coordination have been made that address generic instead of specific scenarios and apply different schemes for the message exchange of driving intentions and maneuver negotiation. This paper presents use cases for maneuver coordination and classifies existing generic approaches for decentralized maneuver coordination considering implicit and explicit trajectory broadcast, cost values and space-time reservation. We systematically describe the approaches, compare them and derive future research topics.}, language = {en} } @inproceedings{HegdeStahlLoboetal.2022, author = {Hegde, Anupama and Stahl, Ringo and Lobo, Silas and Festag, Andreas}, title = {Modeling Cellular Network Infrastructure in SUMO}, volume = {2}, booktitle = {SUMO Conference Proceedings}, publisher = {TIB Open Publishing}, address = {Hannover}, issn = {2750-4425}, doi = {https://doi.org/10.52825/scp.v2i.97}, pages = {99 -- 113}, year = {2022}, abstract = {Communication networks are becoming an increasingly important part of the mobility system. They allow traffic participants to be connected and to exchange information related to traffic and roads. The information exchange impacts the behavior of traffic participants, such as the selection of travel routes or their mobility dynamics. Considering infrastructure-based networks, the information exchange depends on the availability of the network infrastructure and the quality of the communication links. Specifically in urban areas, today's 4G and 5G networks deploy small cells of high capacity, which do not provide ubiquitous cellular coverage due to their small range, signal blocking, etc. Therefore, the accurate modeling of the network infrastructure and its integration in simulation scenarios in microscopic traffic simulation software is gaining relevance. Unlike traffic infrastructure, such as traffic lights, the simulation of a cellular network infrastructure is not natively supported in SUMO. Instead, the protocols, functions and entities of the communication system with the physical wireless transmission are modeled in a dedicated and specialized network simulator that is coupled with SUMO. The disadvantage of this approach is that the simulated SUMO entities, typically vehicles, are not aware which portions of the roads are covered by wireless cells and what quality the wireless communication links have. In this paper, we propose a method for modeling the cellular infrastructure in SUMO that introduces a cellular coverage layer to SUMO. This layer models cell sites in a regular hexagonal grid, where each site is served by a base station. Following commonly accepted guidelines for the evaluation of cellular communication system, the method facilitates standardized and realistic modeling of the cellular coverage, including cell sites, antenna characteristics, cell association and handover. In order to ease the applicability of the method, we describe the work flow to create cell sites. As a representative case, we have applied the method to InTAS, the SUMO Ingolstadt traffic scenario and applied real data for the cellular infrastructure. We validate the approach by simulating a Cellular V2X system with sidelink connectivity in an urban macro cell environment by coupling SUMO enhanced by the proposed connectivity sublayer with ARTERY-C, a network simulator for Cellular V2X. As a proof-of-concept, we present a signal-to-interference noise ratio (SINR) coverage map and further evaluate the impact of different types of interference. We also demonstrate the effect of advanced features of cellular networks such as inter-cell interference coordination (ICIC) and sidelink communication modes of Cellular V2X with dynamic switching between the in-coverage and out-of-coverage mode.}, language = {en} } @inproceedings{DeloozFestagVinel2020, author = {Delooz, Quentin and Festag, Andreas and Vinel, Alexey}, title = {Revisiting Message Generation Strategies for Collective Perception in Connected and Automated Driving}, booktitle = {VEHICULAR 2020: The Ninth International Conference on Advances in Vehicular Systems, Technologies and Applications}, publisher = {IARIA}, address = {[s. l.]}, isbn = {978-1-61208-795-5}, url = {https://www.thinkmind.org/index.php?view=article\&articleid=vehicular_2020_1_80_30039}, pages = {46 -- 52}, year = {2020}, language = {en} } @unpublished{WanZhaoWiedholzetal.2025, author = {Wan, Lei and Zhao, Jianxin and Wiedholz, Andreas and Bied, Manuel and Martinez de Lucena, Mateus and Jagtap, Abhishek Dinkar and Festag, Andreas and Fr{\"o}hlich, Ant{\^o}nio and Keen, Hannan Ejaz and Vinel, Alexey}, title = {A Systematic Literature Review on Vehicular Collaborative Perception - A Computer Vision Perspective}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2504.04631}, year = {2025}, language = {en} } @article{PandeyKuehnWeisetal.2025, author = {Pandey, Amit and K{\"u}hn, Stephan and Weis, Alexander and Wunderle, Bernhard and Elger, Gordon}, title = {Evaluating optical performance degradation of automotive cameras under accelerated aging}, volume = {2026}, pages = {109396}, journal = {Optics and Lasers in Engineering}, number = {196}, publisher = {Elsevier}, address = {Amsterdam}, issn = {1873-0302}, doi = {https://doi.org/10.1016/j.optlaseng.2025.109396}, year = {2025}, abstract = {Automotive cameras are subject to environmental stress, which degrades performance by reducing image sharpness. To qualify for automotive use and to ensure that the cameras maintain sharpness according to the hard requirements of end-of-line testing, cameras have to undergo standardized accelerated aging tests. These tests are performed to demonstrate reliability and functional safety over lifetime. Few studies have been published that demonstrate how aging contributes to the degradation of optical performance. This study addresses this gap by combining accelerated thermal aging with sharpness tracking to investigate degradation over time. To quantify sharpness degradation, six series-production cameras were subjected to accelerated thermal aging between -40◦𝐶 and +85◦𝐶. Each camera underwent 2000 aging cycles, equivalent to 80\% of their lifetime based on the Coffin-Manson model of the LV124 standard. Sharpness was measured by calculating the Spatial Frequency Response (SFR) from images captured of a double-cross reticle projected by a virtual object generator with three illumination wavelengths (625nm, 520nm, and 470nm). The change in sharpness was evaluated with SFR50 and SFR at 60 line pairs per millimeter (SFR@60). During the first 250 cycles, a wear-in effect was observed, where sharpness increased before leveling off, as seen previously. The results also indicated a slow decline in sharpness showing long-term stability. Analysis indicated that before aging, the best focal plane was located closer to the focal position of the red wavelength, which lies furthest from the objective. By the end of the aging process, the best focal plane had shifted toward the focal position of the blue wavelength, which is located closer to the objective. This suggests a forward movement of the image sensor due to aging. Even after 2000 cycles, all cameras maintained an SFR@60 above 0.5. A Random Forest regression model was trained to predict the age based on the SFR curves, achieving a mean absolute error of 126 cycles and a 𝑅2 score of 0.96.}, language = {en} } @article{SchmidZippeliusHanssetal.2023, author = {Schmid, Maximilian and Zippelius, Andreas and Hanß, Alexander and B{\"o}ckhorst, Stephan and Elger, Gordon}, title = {Investigations on High-Power LEDs and Solder Interconnects in Automotive Application: Part II - Reliability}, volume = {23}, journal = {IEEE Transactions on Device and Materials Reliability}, number = {3}, publisher = {IEEE}, address = {New York}, issn = {1558-2574}, doi = {https://doi.org/10.1109/TDMR.2023.3300355}, pages = {419 -- 429}, year = {2023}, language = {en} } @article{MeessGernerHeinetal.2024, author = {Meess, Henri and Gerner, Jeremias and Hein, Daniel and Schmidtner, Stefanie and Elger, Gordon and Bogenberger, Klaus}, title = {First steps towards real-world traffic signal control optimisation by reinforcement learning}, volume = {18}, journal = {Journal of Simulation}, number = {6}, publisher = {Taylor \& Francis}, address = {London}, issn = {1747-7778}, doi = {https://doi.org/10.1080/17477778.2024.2364715}, pages = {957 -- 972}, year = {2024}, abstract = {Enhancing traffic signal optimisation has the potential to improve urban traffic flow without the need for expensive infrastructure modifications. While reinforcement learning (RL) techniques have demonstrated their effectiveness in simulations, their real-world implementation is still a challenge. Real-world systems need to be developed that guarantee a deployable action definition for real traffic systems while prioritising safety constraints and robust policies. This paper introduces a method to overcome this challenge by introducing a novel action definition that optimises parameter-level control programmes designed by traffic engineers. The complete proposed framework consists of a traffic situation estimation, a feature extractor, and a system that enables training on estimates of real-world traffic situations. Further multimodal optimisation, scalability, and continuous training after deployment could be achieved. The first simulative tests using this action definition show an average improvement of more than 20\% in traffic flow compared to the baseline - the corresponding pre-optimised real-world control.}, language = {en} } @unpublished{KettelgerdesHillmannHirmeretal.2023, author = {Kettelgerdes, Marcel and Hillmann, Tjorven and Hirmer, Thomas and Erdogan, H{\"u}seyin and Wunderle, Bernhard and Elger, Gordon}, title = {Accelerated Real-Life (ARL) Testing and Characterization of Automotive LiDAR Sensors to facilitate the Development and Validation of Enhanced Sensor Models}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2312.04229}, year = {2023}, abstract = {In the realm of automated driving simulation and sensor modeling, the need for highly accurate sensor models is paramount for ensuring the reliability and safety of advanced driving assistance systems (ADAS). Hence, numerous works focus on the development of high-fidelity models of ADAS sensors, such as camera, Radar as well as modern LiDAR systems to simulate the sensor behavior in different driving scenarios, even under varying environmental conditions, considering for example adverse weather effects. However, aging effects of sensors, leading to suboptimal system performance, are mostly overlooked by current simulation techniques. This paper introduces a cutting-edge Hardware-in-the-Loop (HiL) test bench designed for the automated, accelerated aging and characterization of Automotive LiDAR sensors. The primary objective of this research is to address the aging effects of LiDAR sensors over the product life cycle, specifically focusing on aspects such as laser beam profile deterioration, output power reduction and intrinsic parameter drift, which are mostly neglected in current sensor models. By that, this proceeding research is intended to path the way, not only towards identifying and modeling respective degradation effects, but also to suggest quantitative model validation metrics.}, language = {en} } @inproceedings{SchmidMombergKettelgerdesetal.2023, author = {Schmid, Maximilian and Momberg, Marcel and Kettelgerdes, Marcel and Elger, Gordon}, title = {Transient thermal analysis for VCSEL Diodes}, booktitle = {2023 29th International Workshop on Thermal Investigations of ICs and Systems (THERMINIC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-3503-1862-3}, doi = {https://doi.org/10.1109/THERMINIC60375.2023.10325906}, year = {2023}, language = {en} } @inproceedings{SchwanSchmidElger2022, author = {Schwan, Hannes and Schmid, Maximilian and Elger, Gordon}, title = {Laser Stimulated Transient Thermal Analysis of Semiconductors}, booktitle = {THERMINIC 2022: Proceedings 2022}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-9229-4}, doi = {https://doi.org/10.1109/THERMINIC57263.2022.9950672}, year = {2022}, language = {en} }