@inproceedings{SchiemannHodappBerger, author = {Schiemann, Markus and Hodapp, Jan and Berger, Ulrich}, title = {Collaboration-Gap: A bus-modular architecture for human-robot-collaboration systems in production environments}, series = {International Symposium on Robotics, ISR 2018, June 20-21, 2018 Messe M{\"u}nchen, Entrance East, Munich, Germany}, booktitle = {International Symposium on Robotics, ISR 2018, June 20-21, 2018 Messe M{\"u}nchen, Entrance East, Munich, Germany}, publisher = {VDE Verlag}, address = {Berlin [u.a.]}, isbn = {978-3-8007-4699-6}, pages = {450 -- 454}, abstract = {Due to the extensive changes along with Industry 4.0 many established barriers drop. One of those barriers which increasingly disappears separates the workspace between human beings and robots and thus prevents a versatile collaborating environment. This change has led to the development of a wide field of research focusing on the prevention of human-robot impacts and/or the minimization of related risks. This paper is focused on safety regulations introduced as a consequence of those developments over the last ten years and particularly on an approach for demonstrating the unintended gap arising between human and robot due to those regulations. Modularity and integrability as two of the key characteristics in the concept of Reconfigurable Manufacturing Systems (RMS), serve as the comparative parameters.}, language = {en} } @misc{SchiemannBergerZuernetal., author = {Schiemann, Markus and Berger, Ulrich and Z{\"u}rn, Michael and Reichenbach, Matthias}, title = {Mensch-Roboter-Kollaboration im Spannungsfeld von Rekonfigurierbarkeit, Sicherheit und Effizienz}, series = {20. Leitkongress der Mess- und Automatisierungstechnik ; AUTOMATION 2019}, journal = {20. Leitkongress der Mess- und Automatisierungstechnik ; AUTOMATION 2019}, publisher = {VDI}, address = {Baden-Banden}, isbn = {978-3-18-092351-2}, pages = {5 -- 14}, language = {de} } @misc{SchiemannBerger, author = {Schiemann, Markus and Berger, Ulrich}, title = {Mensch-Roboter-Kollaboration in einem vernetzten Produktionsumfeld, SmartSafety: Ein beschleunigter Inbetriebnahmeprozess}, series = {Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb : ZWF}, volume = {114}, journal = {Zeitschrift f{\"u}r wirtschaftlichen Fabrikbetrieb : ZWF}, number = {6}, issn = {2511-0896}, doi = {10.3139/104.112108}, pages = {364 -- 366}, language = {de} } @misc{HodappSchiemannBilousetal., author = {Hodapp, Jan and Schiemann, Markus and Bilous, Vadym and Arcidiacono, Claudio Salvatore and Reichenbach, Matthias}, title = {Advances in Automated Generation of Convolutional Neural Networks from Synthetic Data in Industrial Environment}, series = {53rd Hawaii International Conference on System Sciences: Maui, Hawaii, United States of America, Jan 07 - 10, 2020}, journal = {53rd Hawaii International Conference on System Sciences: Maui, Hawaii, United States of America, Jan 07 - 10, 2020}, isbn = {978-0-9981331-3-3}, doi = {10.24251/HICSS.2020.565}, pages = {7}, abstract = {The usage of convolutional neural networks has revolutionized data processing and its application in the industry during the last few years. Especially detection in images, a historically hard task to automate is now available on every smart phone. Nonetheless, this technology has not yet spread in the industry of car production, where lots of visual tests and quality checks are still performed manually. Even though the vision capabilities convolutional neural networks can give machines are already respectable, they still need well prepared training data that is costly and time-consuming to produce. This paper describes our effort to test and improve a system to automatically synthesize training images. This existing system renders computer aided design models into scenes and out of that produces realistic images and corresponding labels. Two new models, Single Shot Detector and RetinaNet are retrained under the use of distractors and then tested against each other. The better performing RetinaNet is then tested for performance under training with a variety of datasets from different domains in order to observe the models strength and weakness under domain shifts. These domains are real photographs, rendered models and images of objects cut and pasted into different backgrounds. The results show that the model trained with a mixture of all domains performs best.}, language = {en} } @misc{SchiemannBergerHodappetal., author = {Schiemann, Markus and Berger, Ulrich and Hodapp, Jan and Z{\"u}rn, Michael}, title = {Roboskin: Increased Robot Working Speed Within Human-Robot-Collaboration Safety Regulations}, series = {Conference Proceedings (IEEE Xplore)}, journal = {Conference Proceedings (IEEE Xplore)}, isbn = {978-1-7281-3325-6}, issn = {2251-2446}, doi = {10.1109/ICCAR.2019.8813448}, pages = {7}, language = {en} }