<?xml version="1.0" encoding="utf-8"?>
<export-example>
  <doc>
    <id>5942</id>
    <completedYear/>
    <publishedYear>2025</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst/>
    <pageLast/>
    <pageNumber>26</pageNumber>
    <edition/>
    <issue>11</issue>
    <volume>25</volume>
    <articleNumber>3422</articleNumber>
    <type>article</type>
    <publisherName>MDPI</publisherName>
    <publisherPlace>Basel</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2025-06-02</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">Infra-3DRC-FusionNet: Deep Fusion of Roadside Mounted RGB Mono Camera and Three-Dimensional Automotive Radar for Traffic User Detection</title>
    <abstract language="eng">Mono RGB cameras and automotive radar sensors provide a complementary information set that makes them excellent candidates for sensor data fusion to obtain robust traffic user detection. This has been widely used in the vehicle domain and recently introduced in roadside-mounted smart infrastructure-based road user detection. However, the performance of the most commonly used late fusion methods often degrades when the camera fails to detect road users in adverse environmental conditions. The solution is to fuse the data using deep neural networks at the early stage of the fusion pipeline to use the complete data provided by both sensors. Research has been carried out in this area, but is limited to vehicle-based sensor setups. Hence, this work proposes a novel deep neural network to jointly fuse RGB mono-camera images and 3D automotive radar point cloud data to obtain enhanced traffic user detection for the roadside-mounted smart infrastructure setup. Projected radar points are first used to generate anchors in image regions with a high likelihood of road users, including areas not visible to the camera. These anchors guide the prediction of 2D bounding boxes, object categories, and confidence scores. Valid detections are then used to segment radar points by instance, and the results are post-processed to produce final road user detections in the ground plane. The trained model is evaluated for different light and weather conditions using ground truth data from a lidar sensor. It provides a precision of 92%, recall of 78%, and F1-score of 85%. The proposed deep fusion methodology has 33%, 6%, and 21% absolute improvement in precision, recall, and F1-score, respectively, compared to object-level spatial fusion output.</abstract>
    <parentTitle language="eng">Sensors</parentTitle>
    <identifier type="issn">1424-8220</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-59421</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="local_crossrefDocumentType">journal-article</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by/4.0/</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorIdentifierOrcid_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorIdentifierOrcid_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PersonAuthorIdentifierOrcid_3,PublisherName,TitleMain_1,Language,TitleAbstract_1,TitleParent_1,ArticleNumber,Issue,Volume,CompletedYear,IdentifierIssn,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.3390/s25113422</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <enrichment key="opus.doi.autoCreate">false</enrichment>
    <enrichment key="opus.urn.autoCreate">true</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Shiva</first_name>
      <last_name>Agrawal</last_name>
    </author>
    <author>
      <first_name>Savankumar</first_name>
      <last_name>Bhanderi</last_name>
    </author>
    <author>
      <first_name>Gordon</first_name>
      <last_name>Elger</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19311">Fakultät Elektro- und Informationstechnik</collection>
    <collection role="institutes" number="19320">Institut für Innovative Mobilität (IIMo)</collection>
    <collection role="persons" number="26589">Elger, Gordon</collection>
    <collection role="institutes" number="19569">Fraunhofer-Anwendungszentrum "Vernetzte Mobilität und Infrastruktur"</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/5942/sensors-25-03422.pdf</file>
  </doc>
  <doc>
    <id>6345</id>
    <completedYear/>
    <publishedYear>2025</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst/>
    <pageLast/>
    <pageNumber>17</pageNumber>
    <edition/>
    <issue/>
    <volume>15</volume>
    <articleNumber>38489</articleNumber>
    <type>article</type>
    <publisherName>Springer Nature</publisherName>
    <publisherPlace>London</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2025-11-10</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">Deep segmentation of 3+1D radar point cloud for real-time roadside traffic user detection</title>
    <abstract language="eng">Smart cities rely on intelligent infrastructure to enhance road safety, optimize traffic flow, and enable vehicle-to-infrastructure (V2I) communication. A key component of such infrastructure is an efficient and real-time perception system that accurately detects diverse traffic participants. Among various sensing modalities, automotive radar is one of the best choices due to its robust performance in adverse weather and low-light conditions. However, due to low spatial resolution, traditional clustering-based approaches for radar object detection often struggle with vulnerable road user detection and nearby object separation. Hence, this paper proposes a deep learning-based D radar point cloud clustering methodology tailored for smart infrastructure-based perception applications. This approach first performs semantic segmentation of the radar point cloud, followed by instance segmentation to generate well-formed clusters with class labels using a deep neural network. It also detects single-point objects that conventional methods often miss. The described approach is developed and experimented using a smart infrastructure-based sensor setup and it performs segmentation of the point cloud in real-time. Experimental results demonstrate 95.35% F1-macro score for semantic segmentation and 91.03% mean average precision (mAP) at an intersection over union (IoU) threshold of 0.5 for instance segmentation. Further, the complete pipeline operates at 43.61 frames per second with a memory requirement of less than 0.7 MB on the edge device (Nvidia Jetson AGX Orin).</abstract>
    <parentTitle language="eng">Scientific Reports</parentTitle>
    <identifier type="issn">2045-2322</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-63458</identifier>
    <enrichment key="opus.import.date">2025-11-06T01:21:21+00:00</enrichment>
    <enrichment key="opus.source">sword</enrichment>
    <enrichment key="opus.import.user">deepgreen</enrichment>
    <enrichment key="opus.import.file">attachment; filename=deposit.zip</enrichment>
    <enrichment key="opus.import.checksum">bd9298f4e4bdc7bb0c5d512fa98ea449</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.1038/s41598-025-23019-6</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <enrichment key="THI_DownloadUrl">https://github.com/bhanderisavan/roadside-radar-seg</enrichment>
    <enrichment key="THI_furtherversion">https://opus4.kobv.de/opus4-haw/frontdoor/index/index/docId/6199</enrichment>
    <enrichment key="opus.doi.autoCreate">false</enrichment>
    <enrichment key="opus.urn.autoCreate">true</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Savankumar</first_name>
      <last_name>Bhanderi</last_name>
    </author>
    <author>
      <first_name>Shiva</first_name>
      <last_name>Agrawal</last_name>
    </author>
    <author>
      <first_name>Gordon</first_name>
      <last_name>Elger</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19311">Fakultät Elektro- und Informationstechnik</collection>
    <collection role="institutes" number="19320">Institut für Innovative Mobilität (IIMo)</collection>
    <collection role="persons" number="26589">Elger, Gordon</collection>
    <collection role="institutes" number="19569">Fraunhofer-Anwendungszentrum "Vernetzte Mobilität und Infrastruktur"</collection>
    <collection role="Import" number="deepgreen">DeepGreen</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/6345/41598_2025_Article_23019.pdf</file>
  </doc>
  <doc>
    <id>6199</id>
    <completedYear/>
    <publishedYear>2025</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst/>
    <pageLast/>
    <pageNumber>18</pageNumber>
    <edition/>
    <issue/>
    <volume/>
    <articleNumber/>
    <type>preprint</type>
    <publisherName>Research Square</publisherName>
    <publisherPlace>Durham</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>0</belongsToBibliography>
    <completedDate>2025-09-09</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">Deep Segmentation of 3+1D Radar Point Cloud for Real-Time Roadside Traffic User Detection</title>
    <abstract language="eng">Smart cities rely on intelligent infrastructure to enhance road safety, optimize traffic flow, and enable vehicle-to-infrastructure (V2I) communication. A key component of such infrastructure is an efficient and real-time perception system that accurately detects diverse traffic participants. Among various sensing modalities, automotive radar is one of the best choices due to its robust performance in adverse weather and low-light conditions. However, due to low spatial resolution, traditional clustering-based approaches for radar object detection often struggle with vulnerable road user detection and nearby object separation. Hence, this paper proposes a deep learning-based 3+1D radar point cloud clustering methodology tailored for smart infrastructure-based perception applications. This approach first performs semantic segmentation of the radar point cloud, followed by instance segmentation to generate well-formed clusters with class labels using a deep neural network. It also detects single-point objects that conventional methods often miss. The described approach is developed and experimented using a smart infrastructure-based sensor setup and it performs segmentation of the point cloud in real-time. Experimental results demonstrate 95.35% F1-macro score for semantic segmentation and 91.03% mean average precision (mAP) at an intersection over union (IoU) threshold of 0.5 for instance segmentation. Further, the complete pipeline operates at 43.61 frames per second with a memory requirement of less than 0.7 MB on the edge device (Nvidia Jetson AGX Orin).</abstract>
    <parentTitle language="eng">Research Square</parentTitle>
    <identifier type="issn">2693-5015</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-61996</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="local_crossrefDocumentType">posted-content/preprint</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by/4.0/</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PublisherName,TitleMain_1,TitleAbstract_1,CompletedYear,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.21203/rs.3.rs-7222130/v1</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">nein</enrichment>
    <enrichment key="THI_furtherversion">https://opus4.kobv.de/opus4-haw/frontdoor/index/index/docId/6345</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Savankumar</first_name>
      <last_name>Bhanderi</last_name>
    </author>
    <author>
      <first_name>Shiva</first_name>
      <last_name>Agrawal</last_name>
    </author>
    <author>
      <first_name>Gordon</first_name>
      <last_name>Elger</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19311">Fakultät Elektro- und Informationstechnik</collection>
    <collection role="institutes" number="19320">Institut für Innovative Mobilität (IIMo)</collection>
    <collection role="persons" number="26589">Elger, Gordon</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/6199/deep_segmentation.pdf</file>
  </doc>
</export-example>
