@inproceedings{PohltHaubnerLangetal., author = {Pohlt, Clemens and Haubner, Franz and Lang, Jonas and Rochholz, Sandra and Schlegl, Thomas and Wachsmuth, Sven}, title = {Effects on User Experience During Human-Robot Collaboration in Industrial Scenarios}, series = {2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC), 7-10 Oct. 2018, Miyazaki, Japan}, booktitle = {2018 IEEE International Conference on Systems, Man, and Cybernetics (SMC), 7-10 Oct. 2018, Miyazaki, Japan}, publisher = {IEEE}, doi = {10.1109/SMC.2018.00150}, pages = {837 -- 842}, abstract = {In smart manufacturing environments robots collaborate with human operators as peers. They even share the same working space and time. An intuitive interaction with different input modalities is decisive to reduce workload and training periods for collaboration. We introduce our interaction system that is able to recognize gestures, actions and objects in a typical smart working scenario. As key aspect, this article considers an empirical investigation of input modalities (touch, gesture), individual differences (performance, recognition rate, previous knowledge) and boundary conditions (level of automation) on user experience. Therefore, answers from 31 participants within two experiments are collected. We show that the arrangement of the human-robot collaboration (input modalities, boundary conditions) has a significant effect on user experience in real-world environments. This effect and the individual differences between participants can be measured utilizing recognition rates and standardized usability questionnaires.}, language = {en} } @inproceedings{PohltHellSchlegletal., author = {Pohlt, Clemens and Hell, Sebastian and Schlegl, Thomas and Wachsmuth, Sven}, title = {Impact of Spontaneous Human Inputs during Gesture based Interaction on a Real-World Manufacturing Scenario}, series = {Proceedings of the 5th International Conference on Human Agent Interaction (HAI '17), Bielefeld Germany, 17.10.2017 -20.10.2017}, booktitle = {Proceedings of the 5th International Conference on Human Agent Interaction (HAI '17), Bielefeld Germany, 17.10.2017 -20.10.2017}, editor = {Wrede, Britta and Nagai, Yukie and Komatsu, Takanori and Hanheide, Marc and Natale, Lorenzo}, publisher = {ACM}, address = {New York, NY}, isbn = {9781450351133}, doi = {10.1145/3125739.3132590}, pages = {347 -- 351}, abstract = {Seamless human-robot collaboration depends on high non-verbal behaviour recognition rates. To realize that in real-world manufacturing scenarios with an ecological valid setup, a lot of effort has to be invested. In this paper, we evaluate the impact of spontaneous inputs on the robustness of human-robot collaboration during gesture-based interaction. A high share of these spontaneous inputs lead to a reduced capability to predict behaviour and subsequently to a loss of robustness. We observe body and hand behaviour during interactive manufacturing of a collaborative task within two experiments. First, we analyse the occurrence frequency, reason and manner of human inputs in specific situations during a human-human experiment. We show the high impact of spontaneous inputs, especially in situations that differ from the typical working procedure. Second, we concentrate on implicit inputs during a real-world Wizard of Oz experiment using our human-robot working cell. We show that hand positions can be used to anticipate user needs in a semi-structured environment by applying knowledge about the semi-structured human behaviour which is distributed over working space and time in a typical manner.}, language = {en} } @inproceedings{PohltSchleglWachsmuth, author = {Pohlt, Clemens and Schlegl, Thomas and Wachsmuth, Sven}, title = {Weakly-Supervised Learning for Multimodal Human Activity Recognition in Human-Robot Collaboration Scenarios}, series = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS): October 25-29, 2020, Las Vegas, NV, USA (virtual)}, booktitle = {2020 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS): October 25-29, 2020, Las Vegas, NV, USA (virtual)}, publisher = {IEEE}, doi = {10.1109/IROS45743.2020.9340788}, pages = {8381 -- 8386}, abstract = {The ability to synchronize expectations among human-robot teams and understand discrepancies between expectations and reality is essential for human-robot collaboration scenarios. To ensure this, human activities and intentions must be interpreted quickly and reliably by the robot using various modalities. In this paper we propose a multimodal recognition system designed to detect physical interactions as well as nonverbal gestures. Existing approaches feature high post-transfer recognition rates which, however, can only be achieved based on well-prepared and large datasets. Unfortunately, the acquisition and preparation of domain-specific samples especially in industrial context is time consuming and expensive. To reduce this effort we introduce a weakly-supervised classification approach. Therefore, we learn a latent representation of the human activities with a variational autoencoder network. Additional modalities and unlabeled samples are incorporated by a scalable product-of-expert sampling approach. The applicability in industrial context is evaluated by two domain-specific collaborative robot datasets. Our results demonstrate, that we can keep the number of labeled samples constant while increasing the network performance by providing additional unprocessed information.}, language = {en} } @inproceedings{PohltSchleglWachsmuth, author = {Pohlt, Clemens and Schlegl, Thomas and Wachsmuth, Sven}, title = {Human Work Activity Recognition for Working Cells in Industrial Production Contexts}, series = {2019 IEEE International Conference on Systems, Man and Cybernetics (SMC), 6-9 Oct. 2019, Bari, Italy}, booktitle = {2019 IEEE International Conference on Systems, Man and Cybernetics (SMC), 6-9 Oct. 2019, Bari, Italy}, doi = {10.1109/SMC.2019.8913873}, pages = {4225 -- 4230}, abstract = {Collaboration between robots and humans requires communicative skills on both sides. The robot has to understand the conscious and unconscious activities of human workers. Many state-of-the-art activity recognition algorithms with high performance rates on existing benchmark datasets are available for this task. This paper re-evaluates appropriate architectures in light of human work activity recognition for working cells in industrial production contexts. The specific constraints of such a domain is elaborated and used as prior knowledge. We utilize state-of-the-art algorithms as spatiotemporal feature encoders and search for appropriate classification and fusion strategies. Furthermore, we combine keypoint-based with appearance-based approaches to a multi-stream recognition system. Due to data protection rules and the high effort of data annotation within industrial domains only small datasets are available that reflect production aspects. Therefore, we use transfer learning approaches to reduce the dependency on data volume and variance in the target domain. The resulting recognition system achieves high performance for both singular person action and human-object interaction.}, language = {en} }