@thesis{Schieber2021, author = {Schieber, Hannah}, title = {Camera and LiDAR based Deep Feature Fusion for 3D Semantic Segmentation}, publisher = {Technische Hochschule Ingolstadt}, address = {Ingolstadt}, url = {http://nbn-resolving.de/urn:nbn:de:bvb:573-33038}, pages = {IX, 92}, year = {2021}, abstract = {For the environmental perception of autonomous vehicles, camera and LiDAR sensors are widely used. Applying computer vision algorithms to the data captured by these sensors is a key for understanding and interpreting the 3D environment. Therefore, 3D semantic segmentation algorithms are common. This master thesis investigates deep fusion of camera and LiDAR features in neural networks to improve 3D semantic segmentation of traffic scenes. Within the presented approach the so-called range view for LiDAR scans is utilized in order to generate 2D images. This 2D projection can be processed by CNNs which have already been used successfully in the camera domain. Building upon a state-of-the-art approach from the camera domain a CNN is adapted and optimized to enable the range view semantic segmentation. Based on CNNs from the camera domain and the adapted range view CNN a deep fusion architecture is built. For the deep fusion different fusion strategies and fusion locations inside the deep fusion architecture are considered. The deep fusion fuses camera and LiDAR features multiple times. Therefore, two fusion locations have been investigated resulting in two deep fusion approaches. Both fusion approaches extract feature maps of the individual modalities at different stages of the backbones. Within the first fusion approach the feature maps are fused and the fused feature maps are processed by several modules to leverage semantic meaningful information. Within the second fusion approach the fused feature maps influence each other in a top-down manner from high to low resolutions and in a bottom-up manner from low to high resolutions before they are processed by the same modules as used in the first fusion approach. Optionally, a feature refinement in the manner of a late fusion step is presented, which aims to refine the features at the last scale before the final semantic segmentation. The deep fusion architecture is evaluated on two challenging outdoor datasets. It outperforms the early and late fusion baseline building upon the same architectures as the presented deep fusion. Also, it outperforms all range image based LiDAR approaches and deep fusion approaches on the used datasets.}, language = {en} } @inproceedings{SchieberDuerrSchoenetal.2022, author = {Schieber, Hannah and Duerr, Fabian and Sch{\"o}n, Torsten and Beyerer, J{\"u}rgen}, title = {Deep Sensor Fusion with Pyramid Fusion Networks for 3D Semantic Segmentation}, booktitle = {2022 IEEE Intelligent Vehicles Symposium (IV)}, publisher = {IEEE}, address = {Piscataway}, isbn = {978-1-6654-8821-1}, doi = {https://doi.org/10.1109/IV51971.2022.9827113}, pages = {375 -- 381}, year = {2022}, language = {en} } @unpublished{SchieberDuerrSchoenetal.2022, author = {Schieber, Hannah and Duerr, Fabian and Sch{\"o}n, Torsten and Beyerer, J{\"u}rgen}, title = {Deep Sensor Fusion with Pyramid Fusion Networks for 3D Semantic Segmentation}, publisher = {arXiv}, address = {Ithaca}, doi = {https://doi.org/10.48550/arXiv.2205.13629}, year = {2022}, language = {en} }