@article{VergnanoPelizzariGiorgiannietal.2025, author = {Vergnano, Alberto and Pelizzari, Alessandro and Giorgianni, Claudio and Kovanda, Jan and Zimmer, Alessandro and Lopes da Silva, Joed and Rezvanpour, Hamed and Leali, Francesco}, title = {Monitoring Occupant Posture Using a Standardized Sensor Interface with a Vehicle Seat}, volume = {9}, pages = {52}, journal = {Designs}, number = {2}, publisher = {MDPI}, address = {Basel}, issn = {2411-9660}, doi = {https://doi.org/10.3390/designs9020052}, year = {2025}, abstract = {Car safety can be enhanced by enabling the Airbag Control Unit (ACU) to adaptively deploy different charges based on the occupant's position once the crash occurs. In this context, monitoring the occupant's position using a sensorized seat integrated with an Inertial Measurement Unit (IMU) offers a practical and cost-effective solution. However, certain challenges still need to be addressed. The adoption of sensorized seats in research and vehicle set-up is still under consideration. This study investigates an interface device that can be reconfigured to suit almost any seat model. This reconfigurability makes it easily adaptable to new vehicles under development and applicable to any passenger seat in the vehicle. This paper details the device's design, including its programming using calibration and monitoring features, which significantly improves its reliability compared to earlier prototypes. Extensive testing through real driving experiments with multiple participants demonstrated an accuracy range of 45-100\%. The testing involved both drivers and passengers, showcasing the device's ability to effectively monitor various in-car scenarios.}, language = {en} } @inproceedings{VriesmanBrittoJuniorZimmeretal.2020, author = {Vriesman, Daniel and Britto Junior, Alceu and Zimmer, Alessandro and Koerich, Alessandro Lameiras}, title = {Texture CNN for thermoelectric metal pipe image classification}, booktitle = {2019 IEEE 31st International Conference on Tools with Artificial Intelligence (ICTAI)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-3798-8}, doi = {https://doi.org/10.1109/ICTAI.2019.00085}, pages = {569 -- 574}, year = {2020}, language = {en} } @article{VriesmanBrittoJuniorZimmeretal.2019, author = {Vriesman, Daniel and Britto Junior, Alceu and Zimmer, Alessandro and Koerich, Alessandro Lameiras and Paludo, Rodrigo}, title = {Automatic visual inspection of thermoelectric metal pipes}, volume = {13}, journal = {Signal, Image and Video Processing}, number = {5}, publisher = {Springer}, address = {London}, issn = {1863-1711}, doi = {https://doi.org/10.1007/s11760-019-01435-2}, pages = {975 -- 983}, year = {2019}, language = {en} } @unpublished{PederivaDeMartinoZimmer2023, author = {Pederiva, Marcelo Eduardo and De Martino, Jos{\´e} Mario and Zimmer, Alessandro}, title = {MonoNext: A 3D Monocular Object Detection with ConvNext}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2308.00596}, year = {2023}, abstract = {Autonomous driving perception tasks rely heavily on cameras as the primary sensor for Object Detection, Semantic Segmentation, Instance Segmentation, and Object Tracking. However, RGB images captured by cameras lack depth information, which poses a significant challenge in 3D detection tasks. To supplement this missing data, mapping sensors such as LIDAR and RADAR are used for accurate 3D Object Detection. Despite their significant accuracy, the multi-sensor models are expensive and require a high computational demand. In contrast, Monocular 3D Object Detection models are becoming increasingly popular, offering a faster, cheaper, and easier-to-implement solution for 3D detections. This paper introduces a different Multi-Tasking Learning approach called MonoNext that utilizes a spatial grid to map objects in the scene. MonoNext employs a straightforward approach based on the ConvNext network and requires only 3D bounding box annotated data. In our experiments with the KITTI dataset, MonoNext achieved high precision and competitive performance comparable with state-of-the-art approaches. Furthermore, by adding more training data, MonoNext surpassed itself and achieved higher accuracies.}, language = {en} } @inproceedings{BoehmlaenderHasirliogluYanoetal.2015, author = {B{\"o}hml{\"a}nder, Dennis and Hasirlioglu, Sinan and Yano, Vitor and Lauerer, Christian and Brandmeier, Thomas and Zimmer, Alessandro}, title = {Advantages in Crash Severity Prediction Using Vehicle to Vehicle Communication}, booktitle = {Proceedings: 2015 45th Annual IEEE/IFIP International Conference on Dependable Systems and Networks Workshops}, publisher = {IEEE}, address = {Los Alamitos}, isbn = {978-1-4673-8044-7}, doi = {https://doi.org/10.1109/DSN-W.2015.23}, pages = {112 -- 117}, year = {2015}, language = {en} } @inproceedings{VriesmanJuniorZimmeretal.2023, author = {Vriesman, Daniel and Junior, Alceu Britto and Zimmer, Alessandro and Brandmeier, Thomas}, title = {Multimodal Early Fusion of Automotive Sensors based on Autoencoder Network: An anchor-free approach for Vehicle 3D Detection}, booktitle = {2023 26th International Conference on Information Fusion (FUSION)}, publisher = {IEEE}, address = {Piscataway}, isbn = {979-8-89034-485-4}, doi = {https://doi.org/10.23919/FUSION52260.2023.10224140}, year = {2023}, language = {en} } @inproceedings{PederivaDeMartinoZimmer2025, author = {Pederiva, Marcelo Eduardo and De Martino, Jos{\´e} Mario and Zimmer, Alessandro}, title = {A light perspective for 3D object detection}, booktitle = {Seventeenth International Conference on Machine Vision (ICMV 2024)}, editor = {Osten, Wolfgang}, publisher = {SPIE}, address = {Bellingham}, isbn = {978-1-5106-8827-8}, doi = {https://doi.org/10.1117/12.3055035}, year = {2025}, language = {en} } @article{TabataZimmerCoelhoetal.2023, author = {Tabata, Alan Naoto and Zimmer, Alessandro and Coelho, Leandro Dos Santos and Mariani, Viviana Cocco}, title = {Analyzing CARLA 's performance for 2D object detection and monocular depth estimation based on deep learning approaches}, volume = {2023}, pages = {120200}, journal = {Expert Systems with Applications}, number = {227}, publisher = {Elsevier}, address = {Amsterdam}, issn = {0957-4174}, doi = {https://doi.org/10.1016/j.eswa.2023.120200}, year = {2023}, language = {en} } @inproceedings{PalandurkarChanSilvaetal.2023, author = {Palandurkar, Tanaya Viraj and Chan, Lap Yan and Silva, Joed Lopes Da and Zimmer, Alessandro and Schwarz, Ulrich}, title = {Driver's Chest Position Detection using FMCW Radar Data Collected in a Vehicle Mock-up and CNN}, booktitle = {2023 24th International Radar Symposium (IRS)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-3-944976-34-1}, doi = {https://doi.org/10.23919/IRS57608.2023.10172421}, year = {2023}, language = {en} } @inproceedings{ChanZimmerLopesdaSilvaetal.2020, author = {Chan, Lap Yan and Zimmer, Alessandro and Lopes da Silva, Joed and Brandmeier, Thomas}, title = {European Union dataset and annotation tool for real time automatic license plate detection and blurring}, booktitle = {2020 IEEE 23rd International Conference on Intelligent Transportation Systems (ITSC)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-7281-4149-7}, doi = {https://doi.org/10.1109/ITSC45102.2020.9294240}, year = {2020}, language = {en} }