<?xml version="1.0" encoding="utf-8"?>
<export-example>
  <doc>
    <id>4955</id>
    <completedYear/>
    <publishedYear>2024</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst/>
    <pageLast/>
    <pageNumber>22</pageNumber>
    <edition/>
    <issue>15</issue>
    <volume>24</volume>
    <articleNumber>4777</articleNumber>
    <type>article</type>
    <publisherName>MDPI</publisherName>
    <publisherPlace>Basel</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2024-07-26</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">The Effect of Annotation Quality on Wear Semantic Segmentation by CNN</title>
    <abstract language="eng">In this work, we investigate the impact of annotation quality and domain expertise on the performance of Convolutional Neural Networks (CNNs) for semantic segmentation of wear on titanium nitride (TiN) and titanium carbonitride (TiCN) coated end mills. Using an innovative measurement system and customized CNN architecture, we found that domain expertise significantly affects model performance. Annotator 1 achieved maximum mIoU scores of 0.8153 for abnormal wear and 0.7120 for normal wear on TiN datasets, whereas Annotator 3 with the lowest expertise achieved significantly lower scores. Sensitivity to annotation inconsistencies and model hyperparameters were examined, revealing that models for TiCN datasets showed a higher coefficient of variation (CV) of 16.32% compared to 8.6% for TiN due to the subtle wear characteristics, highlighting the need for optimized annotation policies and high-quality images to improve wear segmentation.</abstract>
    <parentTitle language="eng">Sensors</parentTitle>
    <identifier type="issn">1424-8220</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-49551</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="opus_import_data">{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2024,7,26]],"date-time":"2024-07-26T00:22:08Z","timestamp":1721953328812},"reference-count":40,"publisher":"MDPI AG","issue":"15","license":[{"start":{"date-parts":[[2024,7,23]],"date-time":"2024-07-23T00:00:00Z","timestamp":1721692800000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Research and Development (R&amp;D) program \u201cFuE Programm Informations- und Kommunikationstechnik Bayern\u201d of the Free State of Bavaria","award":["IUK578\/001","IUK578\/002"]}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"&lt;jats:p&gt;In this work, we investigate the impact of annotation quality and domain expertise on the performance of Convolutional Neural Networks (CNNs) for semantic segmentation of wear on titanium nitride (TiN) and titanium carbonitride (TiCN) coated end mills. Using an innovative measurement system and customized CNN architecture, we found that domain expertise significantly affects model performance. Annotator 1 achieved maximum mIoU scores of 0.8153 for abnormal wear and 0.7120 for normal wear on TiN datasets, whereas Annotator 3 with the lowest expertise achieved significantly lower scores. Sensitivity to annotation inconsistencies and model hyperparameters were examined, revealing that models for TiCN datasets showed a higher coefficient of variation (CV) of 16.32% compared to 8.6% for TiN due to the subtle wear characteristics, highlighting the need for optimized annotation policies and high-quality images to improve wear segmentation.&lt;\/jats:p&gt;","DOI":"10.3390\/s24154777","type":"journal-article","created":{"date-parts":[[2024,7,24]],"date-time":"2024-07-24T14:46:20Z","timestamp":1721832380000},"page":"4777","source":"Crossref","is-referenced-by-count":0,"title":["The Effect of Annotation Quality on Wear Semantic Segmentation by CNN"],"prefix":"10.3390","volume":"24","author":[{"ORCID":"http:\/\/orcid.org\/0000-0003-4065-8467","authenticated-orcid":false,"given":"M\u00fchenad","family":"Bilal","sequence":"first","affiliation":[{"name":"Digital Production, AImotion Bavaria, Technische Hochschule Ingolstadt, 85049 Ingolstadt, Germany"}]},{"given":"Ranadheer","family":"Podishetti","sequence":"additional","affiliation":[{"name":"Digital Production, AImotion Bavaria, Technische Hochschule Ingolstadt, 85049 Ingolstadt, Germany"}]},{"ORCID":"http:\/\/orcid.org\/0000-0003-4845-6579","authenticated-orcid":false,"given":"Leonid","family":"Koval","sequence":"additional","affiliation":[{"name":"Digital Production, AImotion Bavaria, Technische Hochschule Ingolstadt, 85049 Ingolstadt, Germany"}]},{"given":"Mahmoud A.","family":"Gaafar","sequence":"additional","affiliation":[{"name":"Department of Physics, Faculty of Science, Menoufia University, Menoufia 32511, Egypt"},{"name":"Institute of Optical and Electronic Materials, Hamburg University of Technology, 21073 Hamburg, Germany"}]},{"given":"Daniel","family":"Grossmann","sequence":"additional","affiliation":[{"name":"Digital Production, AImotion Bavaria, Technische Hochschule Ingolstadt, 85049 Ingolstadt, Germany"}]},{"given":"Markus","family":"Bregulla","sequence":"additional","affiliation":[{"name":"Digital Production, AImotion Bavaria, Technische Hochschule Ingolstadt, 85049 Ingolstadt, Germany"}]}],"member":"1968","published-online":{"date-parts":[[2024,7,23]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","first-page":"929","DOI":"10.1038\/s42256-021-00399-8","article-title":"Designing clinically translatable artificial intelligence systems for high-dimensional medical imaging","volume":"3","author":"Shad","year":"2021","journal-title":"Nat. Mach. Intell."},{"key":"ref_2","doi-asserted-by":"crossref","first-page":"293","DOI":"10.1038\/s42256-020-0181-6","article-title":"Machine learning for COVID-19 needs global collaboration and data-sharing","volume":"2","year":"2020","journal-title":"Nat. Mach. Intell."},{"key":"ref_3","doi-asserted-by":"crossref","first-page":"298","DOI":"10.1038\/s42256-020-0185-2","article-title":"The challenges of deploying artificial intelligence models in a rapidly evolving pandemic","volume":"2","author":"Hu","year":"2020","journal-title":"Nat. Mach. Intell."},{"key":"ref_4","doi-asserted-by":"crossref","first-page":"4","DOI":"10.1148\/radiol.2020192224","article-title":"Preparing medical imaging data for machine learning","volume":"295","author":"Willemink","year":"2020","journal-title":"Radiology"},{"key":"ref_5","unstructured":"Northcutt, C.G., Athalye, A., and Mueller, J. (2021). Pervasive label errors in test sets destabilize machine learning benchmarks. arXiv."},{"key":"ref_6","doi-asserted-by":"crossref","unstructured":"Rottmann, M., and Reese, M. (2023, January 2\u20137). Automated detection of label errors in semantic segmentation datasets via deep learning and uncertainty quantification. Proceedings of the IEEE\/CVF Winter Conference on Applications of Computer Vision 2023, Waikoloa, HI, USA.","DOI":"10.1109\/WACV56688.2023.00323"},{"key":"ref_7","doi-asserted-by":"crossref","first-page":"100336","DOI":"10.1016\/j.patter.2021.100336","article-title":"Data and its (dis)contents: A survey of dataset development and use in machine learning research","volume":"2","author":"Paullada","year":"2021","journal-title":"Patterns"},{"key":"ref_8","doi-asserted-by":"crossref","first-page":"1357","DOI":"10.1007\/s00170-021-07522-4","article-title":"Application of machine vision method in tool wear monitoring","volume":"116","author":"Peng","year":"2021","journal-title":"Int. J. Adv. Manuf. Technol."},{"key":"ref_9","doi-asserted-by":"crossref","first-page":"76532","DOI":"10.1109\/ACCESS.2021.3082690","article-title":"Simulation-Based Data Augmentation for the Quality Inspection of Structural Adhesive With Deep Learning","volume":"9","author":"Peres","year":"2021","journal-title":"IEEE Access"},{"key":"ref_10","unstructured":"(2024, May 10). Survey: 96% Enterprises Encounter Training Data Quality. Available online: https:\/\/www.businesswire.com\/news\/home\/20190523005183\/en\/Survey-96-Enterprises-Encounter-Training-Data-Quality."},{"key":"ref_11","unstructured":"Su, H., Deng, J., and Fei-Fei, L. (2012, January 22\u201326). Crowdsourcing annotations for visual object detection. Proceedings of the Workshops at the Twenty-Sixth AAAI Conference on Artificial Intelligence, Toronto, ON, Canada."},{"key":"ref_12","doi-asserted-by":"crossref","first-page":"208","DOI":"10.1016\/j.measurement.2016.06.006","article-title":"Application of acoustic emission sensor to investigate the frequency of tool wear and plastic deformation in tool condition monitoring","volume":"92","author":"Bhuiyan","year":"2016","journal-title":"Measurement"},{"key":"ref_13","doi-asserted-by":"crossref","unstructured":"Sun, W.H., and Yeh, S.S. (2018). Using the Machine Vision Method to Develop an On-machine Insert Condition Monitoring System for Computer Numerical Control Turning Machine Tools. Materials, 11.","DOI":"10.3390\/ma11101977"},{"key":"ref_14","unstructured":"Bilal, M., and Mayer, C. (2023). Objektbeleuchtung, European Patent Office. EP4130720."},{"key":"ref_15","doi-asserted-by":"crossref","first-page":"33173","DOI":"10.1109\/ACCESS.2024.3369417","article-title":"Opportunities and Challenges in Data-Centric AI","volume":"12","author":"Kumar","year":"2024","journal-title":"IEEE Access"},{"key":"ref_16","doi-asserted-by":"crossref","first-page":"89","DOI":"10.1016\/j.eng.2021.05.022","article-title":"Data centric design: A new approach to design of microstructural material systems","volume":"10","author":"Chen","year":"2022","journal-title":"Engineering"},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"Fang, C., Xu, Y., and Rockmore, D.N. (2013, January 1\u20138). Unbiased metric learning: On the utilization of multiple datasets and web images for softening bias. Proceedings of the IEEE International Conference on Computer Vision 2013, Sydney, NSW, Australia.","DOI":"10.1109\/ICCV.2013.208"},{"key":"ref_18","unstructured":"Recht, B., Roelofs, R., Schmidt, L., and Shankar, V. (2019, January 9\u201315). Do imagenet classifiers generalize to imagenet?. Proceedings of the International Conference on Machine Learning. PMLR 2019, Long Beach, CA, USA."},{"key":"ref_19","unstructured":"Shankar, V., Roelofs, R., Mania, H., Fang, A., Recht, B., and Schmidt, L. (2020, January 13\u201318). Evaluating machine accuracy on imagenet. Proceedings of the International Conference on Machine Learning. PMLR, 2020, Virtual."},{"key":"ref_20","doi-asserted-by":"crossref","unstructured":"van Horn, G., Branson, S., Farrell, R., Haber, S., Barry, J., Ipeirotis, P., Perona, P., and Belongie, S. (2015, January 7\u201312). Building a bird recognition app and large scale dataset with citizen scientists: The fine print in fine-grained dataset collection. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, 2015, Boston, MA, USA.","DOI":"10.1109\/CVPR.2015.7298658"},{"key":"ref_21","unstructured":"Wah, C., Branson, S., Welinder, P., Perona, P., and Belongie, S. (2011). The Caltech-ucsd Birds-200-2011 Dataset, Caltech."},{"key":"ref_22","doi-asserted-by":"crossref","first-page":"24","DOI":"10.1038\/s41591-018-0316-z","article-title":"A guide to deep learning in healthcare","volume":"25","author":"Esteva","year":"2019","journal-title":"Nat. Med."},{"key":"ref_23","doi-asserted-by":"crossref","unstructured":"Cordts, M., Omran, M., Ramos, S., Rehfeld, T., Enzweiler, M., Benenson, R., Franke, U., Roth, S., and Schiele, B. (July, January 26). The cityscapes dataset for semantic urban scene understanding. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition 2016, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.350"},{"key":"ref_24","doi-asserted-by":"crossref","unstructured":"Taran, V., Gordienko, Y., Rokovyi, A., Alienin, O., and Stirenko, S. (2020). Impact of ground truth annotation quality on performance of semantic image segmentation of traffic conditions. Advances in Computer Science for Engineering and Education II, Springer.","DOI":"10.1007\/978-3-030-16621-2_17"},{"key":"ref_25","doi-asserted-by":"crossref","unstructured":"Zhao, H., Shi, J., Qi, X., Wang, X., and Jia, J. (2017, January 21\u201326). Pyramid scene parsing network. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition 2017, Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.660"},{"key":"ref_26","doi-asserted-by":"crossref","unstructured":"Wu, X., Liu, Y., Zhou, X., and Mou, A. (2019). Automatic identification of tool wear based on convolutional neural network in face milling process. Sensors, 19.","DOI":"10.3390\/s19183817"},{"key":"ref_27","doi-asserted-by":"crossref","first-page":"534","DOI":"10.1016\/j.ifacol.2022.04.249","article-title":"Deep learning and rule-based image processing pipeline for automated metal cutting tool wear detection and measurement","volume":"55","author":"Holst","year":"2022","journal-title":"IFAC-PapersOnLine"},{"key":"ref_28","doi-asserted-by":"crossref","first-page":"947","DOI":"10.1016\/j.promfg.2020.05.134","article-title":"Digital image processing with deep learning for automated cutting tool wear detection","volume":"48","author":"Bergs","year":"2020","journal-title":"Procedia Manuf."},{"key":"ref_29","doi-asserted-by":"crossref","unstructured":"Lutz, B., Kisskalt, D., Regulin, D., Hauser, T., and Franke, J. (2021, January 10\u201312). Material Identification for Smart Manufacturing Systems: A Review. Proceedings of the 2021 4th IEEE International Conference on Industrial Cyber-Physical Systems (ICPS), Victoria, BC, Canada.","DOI":"10.1109\/ICPS49255.2021.9468191"},{"key":"ref_30","first-page":"234","article-title":"U-Net: Convolutional Networks for Biomedical Image Segmentation","volume":"Volume 9351","author":"Navab","year":"2015","journal-title":"Medical Image Computing and Computer-Assisted Intervention\u2014MICCAI 2015"},{"key":"ref_31","doi-asserted-by":"crossref","first-page":"386","DOI":"10.1109\/TPAMI.2018.2844175","article-title":"Mask R-CNN","volume":"42","author":"He","year":"2020","journal-title":"IEEE Trans. Pattern Anal. Mach. Intell."},{"key":"ref_32","unstructured":"(2024, May 01). Allen Goodman, Anne Carpenter, Elizabeth Park, jlefman-nvidia, Josette BoozAllen, Kyle, Maggie, Nilofer, Peter Sedivec, Will Cukierski. 2018 Data Science Bowl. Kaggle. Available online: https:\/\/kaggle.com\/competitions\/data-science-bowl-2018."},{"key":"ref_33","unstructured":"Jacobkie (2024, June 12). Data Science Bowl 2nd Place Solution. Available online: https:\/\/github.com\/jacobkie\/2018DSB."},{"key":"ref_34","doi-asserted-by":"crossref","first-page":"44400","DOI":"10.1109\/ACCESS.2020.2976432","article-title":"Mask R-CNN-Based Detection and Segmentation for Pulmonary Nodule 3D Visualization Diagnosis","volume":"8","author":"Cai","year":"2020","journal-title":"IEEE Access"},{"key":"ref_35","doi-asserted-by":"crossref","first-page":"1423","DOI":"10.1007\/s10845-017-1334-2","article-title":"A novel integrated tool condition monitoring system","volume":"30","author":"Jain","year":"2019","journal-title":"J. Intell. Manuf."},{"key":"ref_36","doi-asserted-by":"crossref","first-page":"3276","DOI":"10.1007\/s10489-021-02542-9","article-title":"An improved U-Net method for the semantic segmentation of remote sensing images","volume":"52","author":"Su","year":"2022","journal-title":"Appl. Intell."},{"key":"ref_37","doi-asserted-by":"crossref","first-page":"209","DOI":"10.1016\/S0263-2241(00)00014-2","article-title":"Reliable tool wear monitoring by optimized image and illumination control in machine vision","volume":"28","author":"Pfeifer","year":"2000","journal-title":"Measurement"},{"key":"ref_38","doi-asserted-by":"crossref","first-page":"157","DOI":"10.1007\/s11263-007-0090-8","article-title":"LabelMe: A database and web-based tool for image annotation","volume":"77","author":"Russell","year":"2008","journal-title":"Int. J. Comput. Vis."},{"key":"ref_39","doi-asserted-by":"crossref","unstructured":"Grigoriev, S.N., Migranov, M.S., Melnik, Y.A., Okunkova, A.A., Fedorov, S.V., Gurin, V.D., and Volosova, M.A. (2021). Application of adaptive materials and coatings to increase cutting tool performance: Efficiency in the case of composite powder high speed steel. Coatings, 11.","DOI":"10.3390\/coatings11070855"},{"key":"ref_40","doi-asserted-by":"crossref","first-page":"215","DOI":"10.1016\/j.promfg.2020.10.031","article-title":"Benchmark of Automated Machine Learning with State-of-the-Art Image Segmentation Algorithms for Tool Condition Monitoring","volume":"51","author":"Lutz","year":"2020","journal-title":"Procedia Manuf."}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/24\/15\/4777\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,7,25]],"date-time":"2024-07-25T15:58:48Z","timestamp":1721923128000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/24\/15\/4777"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024,7,23]]},"references-count":40,"journal-issue":{"issue":"15","published-online":{"date-parts":[[2024,8]]}},"alternative-id":["s24154777"],"URL":"http:\/\/dx.doi.org\/10.3390\/s24154777","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2024,7,23]]}}}</enrichment>
    <enrichment key="local_crossrefDocumentType">journal-article</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by/4.0/</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorIdentifierOrcid_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PersonAuthorIdentifierOrcid_3,PersonAuthorFirstName_4,PersonAuthorLastName_4,PersonAuthorFirstName_5,PersonAuthorLastName_5,PersonAuthorFirstName_6,PersonAuthorLastName_6,PublisherName,TitleMain_1,Language,TitleAbstract_1,TitleParent_1,ArticleNumber,Issue,Volume,CompletedYear,IdentifierIssn,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.3390/s24154777</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <enrichment key="opus.doi.autoCreate">false</enrichment>
    <enrichment key="opus.urn.autoCreate">true</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Mühenad</first_name>
      <last_name>Bilal</last_name>
    </author>
    <author>
      <first_name>Ranadheer</first_name>
      <last_name>Podishetti</last_name>
    </author>
    <author>
      <first_name>Leonid</first_name>
      <last_name>Koval</last_name>
    </author>
    <author>
      <first_name>Mahmoud A.</first_name>
      <last_name>Gaafar</last_name>
    </author>
    <author>
      <first_name>Daniel</first_name>
      <last_name>Großmann</last_name>
    </author>
    <author>
      <first_name>Markus</first_name>
      <last_name>Bregulla</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19310">Fakultät Wirtschaftsingenieurwesen</collection>
    <collection role="institutes" number="19379">AImotion Bavaria</collection>
    <collection role="persons" number="26836">Großmann, Daniel</collection>
    <collection role="persons" number="23276">Bregulla, Markus</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/4955/sensors-24-04777.pdf</file>
  </doc>
  <doc>
    <id>5805</id>
    <completedYear/>
    <publishedYear>2025</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst/>
    <pageLast/>
    <pageNumber>24</pageNumber>
    <edition/>
    <issue>5</issue>
    <volume>25</volume>
    <articleNumber>1575</articleNumber>
    <type>article</type>
    <publisherName>MDPI</publisherName>
    <publisherPlace>Basel</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2025-04-04</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">CNN-Based Classification of Optically Critical Cutting Tools with Complex Geometry: New Insights for CNN-Based Classification Tasks</title>
    <abstract language="eng">Sustainability has increasingly emphasized the importance of recycling and repairing materials. Cutting tools, such as milling cutters and drills, play a crucial role due to the high demands placed on products used in CNC machining. As a result, the repair and regrinding of these tools have become more essential. The geometric differences among machining tools determine their specific applications: twist drills have spiral flutes and pointed cutting edges designed for drilling, while end mills feature multiple sharp edges around the shank, making them suitable for milling. Taps and form cutters exhibit unique geometries and cutting-edge shapes, enabling the creation of complex profiles. However, measuring and classifying these tools for repair or regrinding is challenging due to their optical properties and coatings. This research investigates how lighting conditions affect the classification of tools for regrinding, addressing the shortage of skilled workers and the increasing need for automation. This paper compares different training strategies on two unique tool-specific datasets, each containing 36 distinct tools recorded under two lighting conditions—direct diffuse ring lighting and normal daylight. Furthermore, Grad-CAM heatmap analysis provides new insights into relevant classification features.</abstract>
    <parentTitle language="eng">Sensors</parentTitle>
    <identifier type="issn">1424-8220</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-58058</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="opus_import_data">{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,3,5]],"date-time":"2025-03-05T05:39:07Z","timestamp":1741153147036,"version":"3.38.0"},"reference-count":33,"publisher":"MDPI AG","issue":"5","license":[{"start":{"date-parts":[[2025,3,4]],"date-time":"2025-03-04T00:00:00Z","timestamp":1741046400000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by\/4.0\/"}],"funder":[{"name":"Research and Development Program \u201cForschung und Entwicklung (FUE) Programm Informations- und Kommunikationstechnik Bayern\u201d of the Free State of Bavaria","award":["IUK578\/001","IUK578\/002"]},{"name":"WHM-Herion Linner GmbH"},{"name":"Open Access Publication Fund of Technische Hochschule Ingolstadt"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["Sensors"],"abstract":"&lt;jats:p&gt;Sustainability has increasingly emphasized the importance of recycling and repairing materials. Cutting tools, such as milling cutters and drills, play a crucial role due to the high demands placed on products used in CNC machining. As a result, the repair and regrinding of these tools have become more essential. The geometric differences among machining tools determine their specific applications: twist drills have spiral flutes and pointed cutting edges designed for drilling, while end mills feature multiple sharp edges around the shank, making them suitable for milling. Taps and form cutters exhibit unique geometries and cutting-edge shapes, enabling the creation of complex profiles. However, measuring and classifying these tools for repair or regrinding is challenging due to their optical properties and coatings. This research investigates how lighting conditions affect the classification of tools for regrinding, addressing the shortage of skilled workers and the increasing need for automation. This paper compares different training strategies on two unique tool-specific datasets, each containing 36 distinct tools recorded under two lighting conditions\u2014direct diffuse ring lighting and normal daylight. Furthermore, Grad-CAM heatmap analysis provides new insights into relevant classification features.&lt;\/jats:p&gt;","DOI":"10.3390\/s25051575","type":"journal-article","created":{"date-parts":[[2025,3,4]],"date-time":"2025-03-04T14:01:33Z","timestamp":1741096893000},"page":"1575","source":"Crossref","is-referenced-by-count":0,"title":["CNN-Based Classification of Optically Critical Cutting Tools with Complex Geometry: New Insights for CNN-Based Classification Tasks"],"prefix":"10.3390","volume":"25","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4065-8467","authenticated-orcid":false,"given":"M\u00fchenad","family":"Bilal","sequence":"first","affiliation":[{"name":"Application Cluster \u201cDigital Production\u201d Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt (THI), Esplanade 10, 85049 Ingolstadt, Germany"}]},{"given":"Ranadheer","family":"Podishetti","sequence":"additional","affiliation":[{"name":"Application Cluster \u201cDigital Production\u201d Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt (THI), Esplanade 10, 85049 Ingolstadt, Germany"}]},{"given":"Tangirala Sri","family":"Girish","sequence":"additional","affiliation":[{"name":"Application Cluster \u201cDigital Production\u201d Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt (THI), Esplanade 10, 85049 Ingolstadt, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7388-5757","authenticated-orcid":false,"given":"Daniel","family":"Grossmann","sequence":"additional","affiliation":[{"name":"Application Cluster \u201cDigital Production\u201d Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt (THI), Esplanade 10, 85049 Ingolstadt, Germany"}]},{"given":"Markus","family":"Bregulla","sequence":"additional","affiliation":[{"name":"Application Cluster \u201cDigital Production\u201d Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt (THI), Esplanade 10, 85049 Ingolstadt, Germany"}]}],"member":"1968","published-online":{"date-parts":[[2025,3,4]]},"reference":[{"key":"ref_1","doi-asserted-by":"crossref","unstructured":"Szegedy, C., Vanhoucke, V., Ioffe, S., Shlens, J., and Wojna, Z. (2016, January 27\u201330). Rethinking the inception architecture for computer vision. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.308"},{"key":"ref_2","unstructured":"Wang, N., and Yeung, D.-Y. (2013, January 5\u201310). Learning a deep compact image representation for visual tracking. Proceedings of the Advances in Neural Information Processing Systems, Lake Tahoe, NV, USA."},{"key":"ref_3","doi-asserted-by":"crossref","unstructured":"Dong, C., Loy, C.C., He, K., and Tang, X. (2014). Learning a deep convolutional network for image super-resolution. Computer Vision\u2013ECCV 2014: Proceedings of the 13th European Conference, Zurich, Switzerland, 6\u201312 September 2014; Part IV, Springer.","DOI":"10.1007\/978-3-319-10593-2_13"},{"key":"ref_4","doi-asserted-by":"crossref","unstructured":"Girshick, R., Donahue, J., Darrell, T., and Malik, J. (2014, January 23\u201328). Rich feature hierarchies for accurate object detection and semantic segmentation. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Columbus, OH, USA.","DOI":"10.1109\/CVPR.2014.81"},{"key":"ref_5","doi-asserted-by":"crossref","unstructured":"Long, J., Shelhamer, E., and Darrell, T. (2015, January 7\u201312). Fully convolutional networks for semantic segmentation. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Boston, MA, USA.","DOI":"10.1109\/CVPR.2015.7298965"},{"key":"ref_6","doi-asserted-by":"crossref","unstructured":"Karpathy, A., Toderici, G., Shetty, S., Leung, T., Sukthankar, R., and Fei-Fei, L. (2014, January 23\u201328). Large-scale video classification with convolutional neural networks. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Columbus, OH, USA.","DOI":"10.1109\/CVPR.2014.223"},{"key":"ref_7","doi-asserted-by":"crossref","unstructured":"Wu, X., Liu, Y., Zhou, X., and Mou, A. (2019). Automatic identification of tool wear based on convolutional neural network in face milling process. Sensors, 19.","DOI":"10.3390\/s19183817"},{"key":"ref_8","doi-asserted-by":"crossref","first-page":"661","DOI":"10.1007\/s40684-021-00343-6","article-title":"State of the art in defect detection based on machine vision","volume":"9","author":"Ren","year":"2022","journal-title":"Int. J. Precis. Eng.-Manuf.-Green Technol."},{"key":"ref_9","doi-asserted-by":"crossref","unstructured":"Wei, W., Yin, J., Zhang, J., Zhang, H., and Lu, Z. (2021). Wear and breakage detection of integral spiral end milling cutters based on machine vision. Materials, 14.","DOI":"10.3390\/ma14195690"},{"key":"ref_10","doi-asserted-by":"crossref","first-page":"305","DOI":"10.5220\/0010781800003124","article-title":"High-resolution mask R-CNN-based damage detection on titanium nitride-coated milling tools for condition monitoring using a new illumination technique","volume":"Volume 5","author":"Bilal","year":"2022","journal-title":"Proceedings of the 17th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications (VISIGRAPP 2022)"},{"key":"ref_11","unstructured":"Osten, W., Nikolaev, D., and Zhou, J. (2021, January 8\u201312). Image-based damage detection on TiN-coated milling tools using a multi-light scattering illumination technique. Proceedings of the Fourteenth International Conference on Machine Vision (ICMV 2021), Rome, Italy."},{"key":"ref_12","unstructured":"B\u0103dic\u0103, C., Treur, J., Benslimane, D., Hnatkowska, B., and Kr\u00f3tkiewicz, M. (2022). Damage detection of coated milling tools using images captured by cylindrical-shaped enclosure measurement setup. Advances in Computational Collective Intelligence, Springer International Publishing."},{"key":"ref_13","doi-asserted-by":"crossref","first-page":"124282","DOI":"10.1109\/ACCESS.2024.3454692","article-title":"Automatized end mill wear inspection using a novel illumination unit and convolutional neural network","volume":"12","author":"Bilal","year":"2024","journal-title":"IEEE Access"},{"key":"ref_14","doi-asserted-by":"crossref","first-page":"2278","DOI":"10.1109\/5.726791","article-title":"Gradient-based learning applied to document recognition","volume":"86","author":"Lecun","year":"1998","journal-title":"Proc. IEEE"},{"key":"ref_15","unstructured":"Krizhevsky, A., Sutskever, I., and Hinton, G.E. (2012, January 3\u20136). ImageNet classification with deep convolutional neural networks. Proceedings of the Advances in Neural Information Processing Systems, Lake Tahoe, NV, USA."},{"key":"ref_16","unstructured":"Simonyan, K., and Zisserman, A. (2014). Very deep convolutional networks for large-scale image recognition. arXiv."},{"key":"ref_17","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., and Sun, J. (2015). Deep residual learning for image recognition. arXiv.","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref_18","doi-asserted-by":"crossref","unstructured":"He, K., Zhang, X., Ren, S., and Sun, J. (2016, January 27\u201330). Deep residual learning for image recognition. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Las Vegas, NV, USA.","DOI":"10.1109\/CVPR.2016.90"},{"key":"ref_19","doi-asserted-by":"crossref","unstructured":"Huang, G., Liu, Z., Maaten, L.V.D., and Weinberger, K.Q. (2017, January 21\u201326). Densely connected convolutional networks. Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, Honolulu, HI, USA.","DOI":"10.1109\/CVPR.2017.243"},{"key":"ref_20","first-page":"6105","article-title":"EfficientNet: Rethinking model scaling for convolutional neural networks","volume":"97","author":"Tan","year":"2019","journal-title":"Int. Conf. Mach. Learn."},{"key":"ref_21","unstructured":"Dosovitskiy, A., Beyer, L., Kolesnikov, A., Weissenborn, D., Zhai, X., Unterthiner, T., Dehghani, M., Minderer, M., Heigold, G., and Gelly, S. (2020). An image is worth 16 \u00d7 16 words: Transformers for image recognition at scale. arXiv."},{"key":"ref_22","unstructured":"Krizhevsky, A., and Hinton, G. (2009). Learning Multiple Layers of Features from Tiny Images, University of Toronto. Technichal Report."},{"key":"ref_23","doi-asserted-by":"crossref","unstructured":"Deng, J., Dong, W., Socher, R., Li, L.J., Li, K., and Fei-Fei, L. (2009, January 20\u201325). ImageNet: A large-scale hierarchical image database. Proceedings of the 2009 IEEE Conference on Computer Vision and Pattern Recognition, Miami, FL, USA.","DOI":"10.1109\/CVPR.2009.5206848"},{"key":"ref_24","unstructured":"Xiao, H., Rasul, K., and Vollgraf, R. (2017). Fashion-MNIST: A novel image dataset for benchmarking machine learning algorithms. arXiv."},{"key":"ref_25","unstructured":"Tan, M., and Le, Q.V. (2019). EfficientNet: Rethinking model scaling for convolutional neural networks. arXiv."},{"key":"ref_26","doi-asserted-by":"crossref","first-page":"18","DOI":"10.1016\/j.isprsjprs.2023.01.014","article-title":"Current trends in deep learning for Earth Observation: An open-source benchmark arena for image classification","volume":"197","author":"Dimitrovski","year":"2023","journal-title":"ISPRS J. Photogramm. Remote Sens."},{"key":"ref_27","first-page":"3","article-title":"Tiny ImageNet visual recognition challenge","volume":"7","author":"Le","year":"2015","journal-title":"CS 231N"},{"key":"ref_28","doi-asserted-by":"crossref","unstructured":"Kornblith, S., Shlens, J., and Le, Q.V. (2019, January 15\u201320). Do better ImageNet models transfer better?. Proceedings of the IEEE\/CVF Conference on Computer Vision and Pattern Recognition, Long Beach, CA, USA.","DOI":"10.1109\/CVPR.2019.00277"},{"key":"ref_29","unstructured":"Yosinski, J., Clune, J., Bengio, Y., and Lipson, H. (2014, January 8\u201313). How transferable are features in deep neural networks?. Proceedings of the Advances in Neural Information Processing Systems, Montreal, QC, Canada."},{"key":"ref_30","unstructured":"Raghu, M., Zhang, C., Kleinberg, J., and Bengio, S. (2019, January 8\u201314). Transfusion: Understanding transfer learning for medical imaging. Proceedings of the Advances in Neural Information Processing Systems, Vancouver, BC, Canada."},{"key":"ref_31","doi-asserted-by":"crossref","first-page":"947","DOI":"10.1016\/j.promfg.2020.05.134","article-title":"Digital image processing with deep learning for automated cutting tool wear detection","volume":"48","author":"Bergs","year":"2020","journal-title":"Procedia Manuf."},{"key":"ref_32","doi-asserted-by":"crossref","first-page":"657","DOI":"10.1016\/j.jmsy.2022.04.011","article-title":"Machine learning classification of surface fracture in ultra-precision diamond turning using CSI intensity map images","volume":"64","author":"Nogueira","year":"2022","journal-title":"J. Manuf. Syst."},{"key":"ref_33","doi-asserted-by":"crossref","unstructured":"Ma, W.-C., Chao, S.-H., Chen, B.-Y., Chang, C.-F., Ouhyoung, M., and Nishita, T. (2004). An efficient representation of complex materials for real-time rendering. VRST\u201904: Proceedings of the ACM Symposium on Virtual Reality Software and Technology, Hong Kong, China, 10\u201312 November 2004, Association for Computing Machinery.","DOI":"10.1145\/1077534.1077563"}],"container-title":["Sensors"],"original-title":[],"language":"en","link":[{"URL":"https:\/\/www.mdpi.com\/1424-8220\/25\/5\/1575\/pdf","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2025,3,4]],"date-time":"2025-03-04T14:02:46Z","timestamp":1741096966000},"score":1,"resource":{"primary":{"URL":"https:\/\/www.mdpi.com\/1424-8220\/25\/5\/1575"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2025,3,4]]},"references-count":33,"journal-issue":{"issue":"5","published-online":{"date-parts":[[2025,3]]}},"alternative-id":["s25051575"],"URL":"https:\/\/doi.org\/10.3390\/s25051575","relation":{},"ISSN":["1424-8220"],"issn-type":[{"value":"1424-8220","type":"electronic"}],"subject":[],"published":{"date-parts":[[2025,3,4]]}}}</enrichment>
    <enrichment key="local_crossrefDocumentType">journal-article</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by/4.0/</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorIdentifierOrcid_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PersonAuthorFirstName_4,PersonAuthorLastName_4,PersonAuthorIdentifierOrcid_4,PersonAuthorFirstName_5,PersonAuthorLastName_5,PublisherName,TitleMain_1,Language,TitleAbstract_1,TitleParent_1,ArticleNumber,Issue,Volume,CompletedYear,IdentifierIssn,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.3390/s25051575</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Mühenad</first_name>
      <last_name>Bilal</last_name>
    </author>
    <author>
      <first_name>Ranadheer</first_name>
      <last_name>Podishetti</last_name>
    </author>
    <author>
      <first_name>Sri Girish</first_name>
      <last_name>Tangirala</last_name>
    </author>
    <author>
      <first_name>Daniel</first_name>
      <last_name>Großmann</last_name>
    </author>
    <author>
      <first_name>Markus</first_name>
      <last_name>Bregulla</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19310">Fakultät Wirtschaftsingenieurwesen</collection>
    <collection role="institutes" number="19379">AImotion Bavaria</collection>
    <collection role="persons" number="26836">Großmann, Daniel</collection>
    <collection role="persons" number="23276">Bregulla, Markus</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/5805/sensors-25-01575.pdf</file>
  </doc>
  <doc>
    <id>5966</id>
    <completedYear/>
    <publishedYear>2025</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst>96400</pageFirst>
    <pageLast>96422</pageLast>
    <pageNumber/>
    <edition/>
    <issue/>
    <volume>13</volume>
    <articleNumber/>
    <type>article</type>
    <publisherName>IEEE</publisherName>
    <publisherPlace>New York</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2025-06-10</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">Benchmarking CNN Architectures for Tool Classification: Evaluating CNN Performance on a Unique Dataset Generated by Novel Image Acquisition System</title>
    <abstract language="eng">In this study, we introduce the ToolSurface-144 dataset, which is presented here for the first time. It comprises four subsets – Full R, Full S, Top R, and Top S – each containing 144 tool classes captured under varying illumination conditions and fields of view. A newly developed, patented imaging approach was employed to acquire the data. It is compared with conventional diffuse ring illumination to assess its effectiveness in evaluating state-of-the-art convolutional neural networks. This enabled a more targeted investigation of the role of global shape characteristics such as silhouettes versus localized features like the tool face, cutting edges, and delicate geometrical structures under different training strategies. In this study, we evaluate six state-of-the-art convolutional neural networks—AlexNet, DenseNet161, EfficientNet-B0, ResNet152, ResNet50, and VGG16—using three training strategies: fine-tuning, freezing of pre-trained layers, and training from scratch. The results show that EfficientNet-B0 consistently achieved the highest classification accuracy in nearly all experiments and data sets. Especially using the fine-tuning training strategy, the model achieved 99% accuracy in tool classification. ResNet50 benefited greatly from fine-tuning and freezing, achieving a significant increase in performance compared to training from scratch. In contrast, ResNet152, AlexNet, and VGG16 consistently showed poor classification performance, indicating difficulties regarding learning and generalisation. The results show that diffuse illumination and complete tool views provide the best classification conditions, while restricted image sections with homogeneous illumination negatively affect model performance. Among the evaluated training strategies, fine-tuning proved the most efficient training method for developing CNN models for tool classification.</abstract>
    <parentTitle language="eng">IEEE Access</parentTitle>
    <identifier type="issn">2169-3536</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-59669</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="local_crossrefDocumentType">journal-article</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by/4.0/legalcode</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorIdentifierOrcid_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PersonAuthorIdentifierOrcid_3,PersonAuthorFirstName_4,PersonAuthorLastName_4,PublisherName,TitleMain_1,TitleParent_1,PageNumber,PageFirst,PageLast,Volume,CompletedYear,IdentifierIssn,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.1109/ACCESS.2025.3574785</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <licence>Creative Commons BY 4.0</licence>
    <author>
      <first_name>Mühenad</first_name>
      <last_name>Bilal</last_name>
    </author>
    <author>
      <first_name>Ranadheer</first_name>
      <last_name>Podishetti</last_name>
    </author>
    <author>
      <first_name>Daniel</first_name>
      <last_name>Großmann</last_name>
    </author>
    <author>
      <first_name>Markus</first_name>
      <last_name>Bregulla</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19310">Fakultät Wirtschaftsingenieurwesen</collection>
    <collection role="institutes" number="19379">AImotion Bavaria</collection>
    <collection role="persons" number="26836">Großmann, Daniel</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/5966/Benchmarking_CNN_Architectures.pdf</file>
  </doc>
  <doc>
    <id>5816</id>
    <completedYear/>
    <publishedYear>2024</publishedYear>
    <thesisYearAccepted/>
    <language>eng</language>
    <pageFirst>124282</pageFirst>
    <pageLast>124297</pageLast>
    <pageNumber/>
    <edition/>
    <issue/>
    <volume>12</volume>
    <articleNumber/>
    <type>article</type>
    <publisherName>IEEE</publisherName>
    <publisherPlace>New York</publisherPlace>
    <creatingCorporation/>
    <contributingCorporation/>
    <belongsToBibliography>1</belongsToBibliography>
    <completedDate>2025-04-09</completedDate>
    <publishedDate>--</publishedDate>
    <thesisDateAccepted>--</thesisDateAccepted>
    <title language="eng">Automatized End Mill Wear Inspection Using a Novel Illumination Unit and Convolutional Neural Network</title>
    <abstract language="eng">Ensuring cutting tools are in optimal condition is essential for achieving peak machining performance, given their direct impact on both workpiece quality and process efficiency. However, accurately assessing wear on end mills, especially those with complex geometries, pose a significant challenge due to their reflective surfaces and varied wear patterns. Presented here is a novel method that addresses this challenge by employing a customized illumination unit in conjunction with a convolutional neural network (CNN) for end mill wear analysis. This innovative approach involves utilizing the specially designed illumination unit to capture high-quality images, enabling precise examination of material wear on helically shaped end mills. Notably, this method is tailored to illuminate reflective surfaces and represents a pioneering application in the realm of wear testing.We validate the viability of this approach by employing CNN-based models to segment wear on complex-shaped end mills coated with titanium carbonitride (TiCN) and titanium nitride (TiN). We achieved remarkable mean Intersection over Union (mIoU) results in wear detection on a test dataset: 0.99 for tool segmentation, 0.78 for abnormal wear, and 0.71 for normal wear segmentation.</abstract>
    <parentTitle language="eng">IEEE Access</parentTitle>
    <identifier type="issn">2169-3536</identifier>
    <identifier type="urn">urn:nbn:de:bvb:573-58160</identifier>
    <enrichment key="opus_doi_flag">true</enrichment>
    <enrichment key="opus_import_data">{"status":"ok","message-type":"work","message-version":"1.0.0","message":{"indexed":{"date-parts":[[2025,2,21]],"date-time":"2025-02-21T20:22:53Z","timestamp":1740169373850,"version":"3.37.3"},"reference-count":42,"publisher":"Institute of Electrical and Electronics Engineers (IEEE)","license":[{"start":{"date-parts":[[2024,1,1]],"date-time":"2024-01-01T00:00:00Z","timestamp":1704067200000},"content-version":"vor","delay-in-days":0,"URL":"https:\/\/creativecommons.org\/licenses\/by-nc-nd\/4.0\/"}],"funder":[{"name":"Research and Development Program \u201cForschung und Entwicklung (FUE) Programm Informations\u2014und Kommunikationstechnik Bayern\u201d of the Free State of Bavaria","award":["IUK578\/001","IUK578\/002"]},{"name":"Open Access Publication Fund of Technische Hochschule Ingolstadt"}],"content-domain":{"domain":[],"crossmark-restriction":false},"short-container-title":["IEEE Access"],"published-print":{"date-parts":[[2024]]},"DOI":"10.1109\/access.2024.3454692","type":"journal-article","created":{"date-parts":[[2024,9,5]],"date-time":"2024-09-05T18:34:18Z","timestamp":1725561258000},"page":"124282-124297","source":"Crossref","is-referenced-by-count":0,"title":["Automatized End Mill Wear Inspection Using a Novel Illumination Unit and Convolutional Neural Network"],"prefix":"10.1109","volume":"12","author":[{"ORCID":"https:\/\/orcid.org\/0000-0003-4065-8467","authenticated-orcid":false,"given":"M\u00fchenad","family":"Bilal","sequence":"first","affiliation":[{"name":"Application Cluster &amp;#x201C;Digital Production&amp;#x201D; Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt, Ingolstadt, Germany"}]},{"given":"Ranadheer","family":"Podishetti","sequence":"additional","affiliation":[{"name":"Application Cluster &amp;#x201C;Digital Production&amp;#x201D; Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt, Ingolstadt, Germany"}]},{"ORCID":"https:\/\/orcid.org\/0000-0003-4845-6579","authenticated-orcid":false,"given":"Leonid","family":"Koval","sequence":"additional","affiliation":[{"name":"Application Cluster &amp;#x201C;Digital Production&amp;#x201D; Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt, Ingolstadt, Germany"}]},{"given":"Mahmoud A.","family":"Gaafar","sequence":"additional","affiliation":[{"name":"Department of Physics, Faculty of Science, Menoufia University, Menoufia, Egypt"}]},{"ORCID":"https:\/\/orcid.org\/0000-0002-7388-5757","authenticated-orcid":false,"given":"Daniel","family":"Grossmann","sequence":"additional","affiliation":[{"name":"Application Cluster &amp;#x201C;Digital Production&amp;#x201D; Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt, Ingolstadt, Germany"}]},{"given":"Markus","family":"Bregulla","sequence":"additional","affiliation":[{"name":"Application Cluster &amp;#x201C;Digital Production&amp;#x201D; Progarm, AImotion Bavaria Instiutute, Technische Hochschule Ingolstadt, Ingolstadt, Germany"}]}],"member":"263","reference":[{"key":"ref1","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmatprotec.2009.01.013"},{"key":"ref2","doi-asserted-by":"publisher","DOI":"10.1007\/s00170-014-6560-6"},{"key":"ref3","doi-asserted-by":"publisher","DOI":"10.1007\/s00170-018-1768-5"},{"key":"ref4","doi-asserted-by":"publisher","DOI":"10.1016\/j.measurement.2016.06.006"},{"key":"ref5","doi-asserted-by":"publisher","DOI":"10.3390\/ma11101977"},{"key":"ref6","doi-asserted-by":"publisher","DOI":"10.1016\/S0890-6955(01)00108-0"},{"key":"ref7","doi-asserted-by":"publisher","DOI":"10.1016\/S0043-1648(97)00137-3"},{"key":"ref8","doi-asserted-by":"publisher","DOI":"10.1016\/S0924-0136(01)00853-6"},{"key":"ref9","doi-asserted-by":"publisher","DOI":"10.1177\/1475921704047500"},{"key":"ref10","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmapro.2016.03.010"},{"key":"ref11","doi-asserted-by":"publisher","DOI":"10.1016\/S0890-6955(99)00084-X"},{"key":"ref12","doi-asserted-by":"publisher","DOI":"10.1177\/0954406215616145"},{"key":"ref13","doi-asserted-by":"publisher","DOI":"10.1007\/s00170-019-04020-6"},{"key":"ref14","doi-asserted-by":"publisher","DOI":"10.1016\/j.compind.2005.05.009"},{"key":"ref15","doi-asserted-by":"publisher","DOI":"10.1016\/j.ijmachtools.2005.04.006"},{"key":"ref16","doi-asserted-by":"publisher","DOI":"10.3390\/ma14195690"},{"key":"ref17","doi-asserted-by":"publisher","DOI":"10.1109\/ACCESS.2024.3374890"},{"key":"ref18","doi-asserted-by":"publisher","DOI":"10.3390\/s19183817"},{"key":"ref19","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmapro.2022.03.004"},{"key":"ref20","doi-asserted-by":"publisher","DOI":"10.1016\/j.procir.2020.01.042"},{"key":"ref21","doi-asserted-by":"publisher","DOI":"10.1016\/j.jmapro.2021.03.005"},{"key":"ref22","doi-asserted-by":"publisher","DOI":"10.1016\/j.knosys.2024.112098"},{"key":"ref23","doi-asserted-by":"publisher","DOI":"10.3390\/s21238003"},{"key":"ref24","doi-asserted-by":"publisher","DOI":"10.1016\/B978-0-32-395365-8.00018-X"},{"key":"ref25","doi-asserted-by":"publisher","DOI":"10.1016\/j.cmpbup.2022.100090"},{"key":"ref26","doi-asserted-by":"publisher","DOI":"10.1007\/978-3-319-24574-4_28"},{"issue":"4","key":"ref27","first-page":"1","article-title":"Design of lighting system in multi vision detection","volume":"24","author":"Wang","year":"2009","journal-title":"Electro-Optic Technol. Appl."},{"key":"ref28","first-page":"18","article-title":"Image capture in machine vision","volume":"1","author":"Liu","year":"2003","journal-title":"Comput. Inf. Technol."},{"key":"ref29","doi-asserted-by":"publisher","DOI":"10.1109\/TCPMT.2019.2895729"},{"key":"ref30","doi-asserted-by":"publisher","DOI":"10.1007\/s40684-020-00197-4"},{"key":"ref31","doi-asserted-by":"publisher","DOI":"10.1007\/s40684-021-00343-6"},{"key":"ref32","doi-asserted-by":"publisher","DOI":"10.1145\/1077534.1077563"},{"key":"ref33","doi-asserted-by":"publisher","DOI":"10.1016\/j.measurement.2020.107773"},{"key":"ref34","doi-asserted-by":"publisher","DOI":"10.1145\/360825.360839"},{"key":"ref35","doi-asserted-by":"publisher","DOI":"10.1016\/j.precisioneng.2016.01.003"},{"key":"ref36","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-015-0816-y"},{"key":"ref37","doi-asserted-by":"publisher","DOI":"10.1007\/s11263-009-0275-4"},{"key":"ref38","doi-asserted-by":"publisher","DOI":"10.1109\/access.2020.3029555"},{"key":"ref39","doi-asserted-by":"publisher","DOI":"10.1109\/TII.2024.3383547"},{"issue":"3","key":"ref40","first-page":"1234","article-title":"Design, automation and test of CNNs for rapid image recognition in optical quality control","volume":"12","author":"Weimer","year":"2016","journal-title":"IEEE Trans. Ind. Informat."},{"key":"ref41","doi-asserted-by":"publisher","DOI":"10.3390\/app8091575"},{"key":"ref42","doi-asserted-by":"publisher","DOI":"10.1109\/IJCNN.2012.6252468"}],"container-title":["IEEE Access"],"original-title":[],"link":[{"URL":"http:\/\/xplorestaging.ieee.org\/ielx8\/6287639\/10380310\/10666693.pdf?arnumber=10666693","content-type":"unspecified","content-version":"vor","intended-application":"similarity-checking"}],"deposited":{"date-parts":[[2024,9,12]],"date-time":"2024-09-12T17:59:18Z","timestamp":1726163958000},"score":1,"resource":{"primary":{"URL":"https:\/\/ieeexplore.ieee.org\/document\/10666693\/"}},"subtitle":[],"short-title":[],"issued":{"date-parts":[[2024]]},"references-count":42,"URL":"https:\/\/doi.org\/10.1109\/access.2024.3454692","relation":{},"ISSN":["2169-3536"],"issn-type":[{"type":"electronic","value":"2169-3536"}],"subject":[],"published":{"date-parts":[[2024]]}}}</enrichment>
    <enrichment key="local_crossrefDocumentType">journal-article</enrichment>
    <enrichment key="local_crossrefLicence">https://creativecommons.org/licenses/by-nc-nd/4.0/</enrichment>
    <enrichment key="local_import_origin">crossref</enrichment>
    <enrichment key="local_doiImportPopulated">PersonAuthorFirstName_1,PersonAuthorLastName_1,PersonAuthorIdentifierOrcid_1,PersonAuthorFirstName_2,PersonAuthorLastName_2,PersonAuthorFirstName_3,PersonAuthorLastName_3,PersonAuthorIdentifierOrcid_3,PersonAuthorFirstName_4,PersonAuthorLastName_4,PersonAuthorFirstName_5,PersonAuthorLastName_5,PersonAuthorIdentifierOrcid_5,PersonAuthorFirstName_6,PersonAuthorLastName_6,PublisherName,TitleMain_1,TitleParent_1,PageNumber,PageFirst,PageLast,Volume,CompletedYear,IdentifierIssn,Enrichmentlocal_crossrefLicence</enrichment>
    <enrichment key="opus.source">doi-import</enrichment>
    <enrichment key="THI_relatedIdentifier">https://doi.org/10.1109/ACCESS.2024.3454692</enrichment>
    <enrichment key="THI_openaccess">ja</enrichment>
    <enrichment key="THI_review">peer-review</enrichment>
    <enrichment key="THI_articleversion">published</enrichment>
    <licence>Creative Commons BY-NC-ND 4.0</licence>
    <author>
      <first_name>Mühenad</first_name>
      <last_name>Bilal</last_name>
    </author>
    <author>
      <first_name>Ranadheer</first_name>
      <last_name>Podishetti</last_name>
    </author>
    <author>
      <first_name>Leonid</first_name>
      <last_name>Koval</last_name>
    </author>
    <author>
      <first_name>Mahmoud A.</first_name>
      <last_name>Gaafar</last_name>
    </author>
    <author>
      <first_name>Daniel</first_name>
      <last_name>Großmann</last_name>
    </author>
    <author>
      <first_name>Markus</first_name>
      <last_name>Bregulla</last_name>
    </author>
    <collection role="open_access" number="">open_access</collection>
    <collection role="institutes" number="19310">Fakultät Wirtschaftsingenieurwesen</collection>
    <collection role="institutes" number="19379">AImotion Bavaria</collection>
    <collection role="persons" number="26836">Großmann, Daniel</collection>
    <collection role="persons" number="23276">Bregulla, Markus</collection>
    <thesisPublisher>Technische Hochschule Ingolstadt</thesisPublisher>
    <file>https://opus4.kobv.de/opus4-haw/files/5816/Automatized_End_Mill.pdf</file>
  </doc>
</export-example>
