@inproceedings{FadlSchoenBehretetal.2025, author = {Fadl, Islam and Sch{\"o}n, Torsten and Behret, Valentino and Brandmeier, Thomas and Palme, Frank and Helmer, Thomas}, title = {Environment Setup and Model Benchmark of the MuFoRa Dataset}, booktitle = {Proceedings of the 20th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - (Volume 3)}, editor = {Bashford-Rogers, Thomas and Meneveaux, Daniel and Ammi, Mehdi and Ziat, Mounia and J{\"a}nicke, Stefan and Purchase, Helen and Radeva, Petia and Furnari, Antonino and Bouatouch, Kadi and Sousa, A. Augusto}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-728-3}, doi = {https://doi.org/10.5220/0013307900003912}, pages = {729 -- 737}, year = {2025}, abstract = {Adverse meteorological conditions, particularly fog and rain, present significant challenges to computer vision algorithms and autonomous systems. This work presents MuFoRa a novel, controllable, and measured multimodal dataset recorded at CARISSMA's indoor test facility, specifically designed to assess perceptual difficulties in foggy and rainy environments. The dataset bridges research gap in the public benchmarking datasets, where quantifiable weather parameters are lacking. The proposed dataset comprises synchronized data from two sensor modalities: RGB stereo cameras and LiDAR sensors, captured under varying intensities of fog and rain. The dataset incorporates synchronized meteorological annotations, such as visibility through fog and precipitation levels of rain, and the study contributes a detailed explanation of the diverse weather effects observed during data collection in the methods section. The dataset's utility is demonstrated through a baseline evaluation example, asse ssing the performance degradation of state-of-the-art YOLO11 and DETR 2D object detection algorithms under controlled and quantifiable adverse weather conditions. The public release of the dataset (https://doi.org/10.5281/zenodo.14175611) facilitates various benchmarking and quantitative assessments of advanced multimodal computer vision and deep learning models under the challenging conditions of fog and rain.}, language = {en} } @inproceedings{BehretKushtanovaFadletal.2025, author = {Behret, Valentino and Kushtanova, Regina and Fadl, Islam and Weber, Simon and Helmer, Thomas and Palme, Frank}, title = {Sensor Calibration and Data Analysis of the MuFoRa Dataset}, booktitle = {Proceedings of the 20th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications - (Volume 2)}, editor = {Bashford-Rogers, Thomas and Meneveaux, Daniel and Ammi, Mehdi and Ziat, Mounia and J{\"a}nicke, Stefan and Purchase, Helen and Radeva, Petia and Furnari, Antonino and Bouatouch, Kadi and Sousa, A. Augusto}, publisher = {SciTePress}, address = {Set{\´u}bal}, isbn = {978-989-758-728-3}, doi = {https://doi.org/10.5220/0013310400003912}, pages = {622 -- 631}, year = {2025}, abstract = {Autonomous driving sensors face significant challenges under adverse weather conditions such as fog and rain, which can seriously degrade their performance and reliability. Existing datasets often lack the reproducible and measurable data needed to adequately quantify these effects. To address this gap, a new multimodal dataset (MuFoRa) has been collected under controlled adverse weather conditions at the CARISSMA facility, using a stereo camera and two solid-state LiDAR sensors. This dataset is used to quantitatively assess sensor degradation by measuring the entropy for images and the number of inliers for point clouds on a spherical target. These metrics are used to evaluate the impact on performance under varying conditions of fog (5 to 150 m visibility) and rain (20 to 100 mm/h intensity) at different distances (5 to 50 m). Additionally, two calibration target detection approaches - Deep-learning and Hough-based - are evaluated to achieve accurate sensor alignment. The contributions include the introduction of a new dataset focused on fog and rain, the evaluation of sensor degradation, and an improved calibration approach. This dataset is intended to support the development of more robust sensor fusion and object detection algorithms for autonomous driving.}, language = {en} } @article{BauderKubjatkoHelmeretal.2022, author = {Bauder, Maximilian and Kubjatko, Tibor and Helmer, Thomas and Schweiger, Hans-Georg}, title = {Does Vehicle-2-X Radio Transmission Technology Need to Be Considered within Accident Analysis in the Future?}, volume = {22}, pages = {9832}, journal = {Sensors}, number = {24}, publisher = {MDPI}, address = {Basel}, issn = {1424-8220}, doi = {https://doi.org/10.3390/s22249832}, year = {2022}, abstract = {In this analysis, Cooperative Intelligent Transportation System relevant scenarios are created to investigate the need to differentiate Vehicle-to-X transmission technologies on behalf of accident analysis. For each scenario, the distances between the vehicles are calculated 5 s before the crash. Studies on the difference between Dedicated Short-Range Communication (IEEE 802.11p) and Cellular Vehicle-to-X communication (LTE-V2C PC5 Mode 4) are then used to assess whether both technologies have a reliable connection over the relevant distance. If this is the case, the transmission technology is of secondary importance for future investigations on Vehicle-to-X communication in combination with accident analysis. The results show that studies on freeways and rural roads can be carried out independently of the transmission technology and other boundary conditions (speed, traffic density, non-line of sight/line of sight). The situation is different for studies in urban areas, where both technologies may not have a sufficiently reliable connection range depending on the traffic density.}, language = {en} }