2022
|
Baumeister, Annalies; Pascher, Max; Shivashankar, Yashaswini; Goldau, Felix Ferdinand; Frese, Udo; Gerken, Jens; Gardo, Elizaveta; Klein, Barbara; Tolle, Patrizia AI for Simplifying the Use of an Assistive Robotic Arm for People with severe Body Impairments VortragDoF Daegu, Korea, 24.10.2022. @misc{Baumeister2022,
title = {AI for Simplifying the Use of an Assistive Robotic Arm for People with severe Body Impairments},
author = {Annalies Baumeister and Max Pascher and Yashaswini Shivashankar and Felix Ferdinand Goldau and Udo Frese and Jens Gerken and Elizaveta Gardo and Barbara Klein and Patrizia Tolle},
editor = {Proceedings of the13th World Conference of Gerontechnology (ISG 2022)},
url = {https://hci.w-hs.de/pub_isg2022_dof-adaptiv/, PDF Download},
doi = {http://dx.doi.org/10.13140/RG.2.2.13860.35208/3},
year = {2022},
date = {2022-10-24},
urldate = {2022-10-24},
address = {Daegu, Korea},
abstract = {Assistive robotic arms, e.g., the Kinova JACO, aim to assist people with upper-body disabilities in everyday tasks and thus increase their autonomy (Brose et al. 2010; Beaudoin et al.2019). A long-term survey with seven JACO users showed that they were satisfied with the technology and that JACO had a positive psychosocial impact. Still, the users had some difficulties performing daily activities with the arm, e.g., it took them some time to finish a task (Beaudoin et al. 2019). Herlant et al. claim that the main problem for a user is that mode switching is time-consuming and tiring (Herlant et al. 2017). To tackle this issue, deep neural network(s) will be developed to facilitate the use of the robotic arm. A sensor-based situation recognition will be combined with an algorithm-based control to form an adaptive AI-based control system. The project focuses on three main aspects: 1) A neural network providing suggestions for movement options based on training data generated in virtual reality. 2) Exploring data glasses as a possibility for displaying feedback in a user-centered design process. 3) Elicitation of requirements, risks and ethical system evaluation using a participatory approach.},
keywords = {adaptive AI-based control, assistive robotic arm, degrees of freedom, human-robot collaboration, quality of live},
pubstate = {published},
tppubtype = {presentation}
}
Assistive robotic arms, e.g., the Kinova JACO, aim to assist people with upper-body disabilities in everyday tasks and thus increase their autonomy (Brose et al. 2010; Beaudoin et al.2019). A long-term survey with seven JACO users showed that they were satisfied with the technology and that JACO had a positive psychosocial impact. Still, the users had some difficulties performing daily activities with the arm, e.g., it took them some time to finish a task (Beaudoin et al. 2019). Herlant et al. claim that the main problem for a user is that mode switching is time-consuming and tiring (Herlant et al. 2017). To tackle this issue, deep neural network(s) will be developed to facilitate the use of the robotic arm. A sensor-based situation recognition will be combined with an algorithm-based control to form an adaptive AI-based control system. The project focuses on three main aspects: 1) A neural network providing suggestions for movement options based on training data generated in virtual reality. 2) Exploring data glasses as a possibility for displaying feedback in a user-centered design process. 3) Elicitation of requirements, risks and ethical system evaluation using a participatory approach. |
Arévalo-Arboleda, Stephanie Towards a Human-Robot Interaction Design for People with Motor Disabilities by Enhancing the Visual Space PromotionsarbeitMIA 2022. @phdthesis{Arévalo-Arboleda2022b,
title = {Towards a Human-Robot Interaction Design for People with Motor Disabilities by Enhancing the Visual Space},
author = {Stephanie Arévalo-Arboleda},
editor = {Universität Duisburg-Essen},
doi = {10.17185/duepublico/76485},
year = {2022},
date = {2022-09-08},
abstract = {People with motor disabilities experience several physical limitations that affect not only their activities of daily living but their integration into the labor market. Human-Robot Collaboration presents opportunities to enhance human capabilities and counters physical limitations through different interaction paradigms and technological devices. However, little is known about the needs, expectations, and perspectives of people with motor disabilities within a human-robot collaborative work environment.
In this thesis, we aim to shed light on the perspectives of people with motor disabilities when designing a teleoperation concept that could enable them to perform manipulation tasks in a manufacturing environment. First, we provide the concerns of different people with motor disabilities, social workers, and caregivers about including a collaborative robotic arm in assembly lines. Second, we identify specific opportunities and potential challenges in hands-free interaction design for robot control. Third, we present a multimodal hands-free interaction for robot control that uses augmented reality to display the user interface. On top of that, we propose a feedback concept that provides augmented visual cues to aid robot operators in gaining a better perception of the location of the objects in the workspace and improve performance in pick-and-place tasks.
We present our contributions through six studies with people with and without disabilities, and the empirical findings are reported in eight publications. Publications I, II, and IV aim to extend the research efforts of designing human-robot collaborative spaces for people with motor disabilities. Publication III sheds a light on the reasoning for hands-free modality choices and Publication VIII evaluates a hands-free teleoperation concept with an individual with motor disabilities. Publications V - VIII explore augmented reality to present a user interface that facilitates hands-free robot control and uses augmented visual cues to address depth perception issues improving thus performance in pick-and-place tasks.
Our findings can be summarized as follows. We point out concerns grouped into three themes: the robot fitting in the social and organizational structure, human-robot synergy, and human-robot problem management. Additionally, we provide five lessons learned derived from the pragmatic use of participatory design for people with motor disabilities, (1) approach participants through different channels and allow for multidisciplinarity in the research team, (2) consider the relationship between social dependencies in the selection of a participatory design technique, (3) plan for early exposure to robots and other technology, (4) take into account all opinions in design sessions, and (5) acknowledge that ethical implications go beyond consent. Also, we introduce findings about the nature of modality choices in hands-free interaction, which point to the user’s own abilities and individual experiences as determining factors in interaction evaluation. Finally, we present and evaluate a possible hands-free multimodal interaction design for robot control using augmented reality and augmented visual cues. We propose that augmented visual cues can improve depth perception and performance in pick-and-place tasks. Thus, we evaluated our designs of visual cues by taking into account depth-related variables (target’s distance and pose) and subjective certainty. Our results highlight that shorter distances and a clear pose lead to higher success, faster grasping time, and higher certainty. In addition, we re-designed our augmented visual cues considering visualization techniques and monocular cues that could be used to enhance the visual space for robot teleoperation. Our results demonstrate that our augmented visual cues can assist robot control and increase accuracy in pick-and-place tasks.
In conclusion, our findings on people with motor disabilities in a human-robot collaborative workplace, a hands-free multimodal interaction design, and augmented visual cues can extend the knowledge about using mixed reality in human-robot interaction. Further, these contributions have the potential to promote future research to design inclusive environments for people with disabilities.},
keywords = {assistive robotics, augmented reality},
pubstate = {published},
tppubtype = {phdthesis}
}
People with motor disabilities experience several physical limitations that affect not only their activities of daily living but their integration into the labor market. Human-Robot Collaboration presents opportunities to enhance human capabilities and counters physical limitations through different interaction paradigms and technological devices. However, little is known about the needs, expectations, and perspectives of people with motor disabilities within a human-robot collaborative work environment.
In this thesis, we aim to shed light on the perspectives of people with motor disabilities when designing a teleoperation concept that could enable them to perform manipulation tasks in a manufacturing environment. First, we provide the concerns of different people with motor disabilities, social workers, and caregivers about including a collaborative robotic arm in assembly lines. Second, we identify specific opportunities and potential challenges in hands-free interaction design for robot control. Third, we present a multimodal hands-free interaction for robot control that uses augmented reality to display the user interface. On top of that, we propose a feedback concept that provides augmented visual cues to aid robot operators in gaining a better perception of the location of the objects in the workspace and improve performance in pick-and-place tasks.
We present our contributions through six studies with people with and without disabilities, and the empirical findings are reported in eight publications. Publications I, II, and IV aim to extend the research efforts of designing human-robot collaborative spaces for people with motor disabilities. Publication III sheds a light on the reasoning for hands-free modality choices and Publication VIII evaluates a hands-free teleoperation concept with an individual with motor disabilities. Publications V - VIII explore augmented reality to present a user interface that facilitates hands-free robot control and uses augmented visual cues to address depth perception issues improving thus performance in pick-and-place tasks.
Our findings can be summarized as follows. We point out concerns grouped into three themes: the robot fitting in the social and organizational structure, human-robot synergy, and human-robot problem management. Additionally, we provide five lessons learned derived from the pragmatic use of participatory design for people with motor disabilities, (1) approach participants through different channels and allow for multidisciplinarity in the research team, (2) consider the relationship between social dependencies in the selection of a participatory design technique, (3) plan for early exposure to robots and other technology, (4) take into account all opinions in design sessions, and (5) acknowledge that ethical implications go beyond consent. Also, we introduce findings about the nature of modality choices in hands-free interaction, which point to the user’s own abilities and individual experiences as determining factors in interaction evaluation. Finally, we present and evaluate a possible hands-free multimodal interaction design for robot control using augmented reality and augmented visual cues. We propose that augmented visual cues can improve depth perception and performance in pick-and-place tasks. Thus, we evaluated our designs of visual cues by taking into account depth-related variables (target’s distance and pose) and subjective certainty. Our results highlight that shorter distances and a clear pose lead to higher success, faster grasping time, and higher certainty. In addition, we re-designed our augmented visual cues considering visualization techniques and monocular cues that could be used to enhance the visual space for robot teleoperation. Our results demonstrate that our augmented visual cues can assist robot control and increase accuracy in pick-and-place tasks.
In conclusion, our findings on people with motor disabilities in a human-robot collaborative workplace, a hands-free multimodal interaction design, and augmented visual cues can extend the knowledge about using mixed reality in human-robot interaction. Further, these contributions have the potential to promote future research to design inclusive environments for people with disabilities. |
Pascher, Max; Kronhardt, Kirill; Franzen, Til; Gerken, Jens Adaptive DoF: Concepts to Visualize AI-generated Movements in Human-Robot Collaboration KonferenzbeitragDoF In: Proceedings of the 2022 International Conference on Advanced Visual Interfaces (AVI 2022), ACM, NewYork, NY, USA, 2022, ISBN: 978-1-4503-9719-3/22/06. @inproceedings{Pascher2022b,
title = {Adaptive DoF: Concepts to Visualize AI-generated Movements in Human-Robot Collaboration},
author = {Max Pascher and Kirill Kronhardt and Til Franzen and Jens Gerken},
url = {https://hci.w-hs.de/pub_adaptive-dof-concepts-to-visualize-ai-generated-movements-in-human-robot-collaboration/, PDF Download},
doi = {10.1145/3531073.3534479},
isbn = {978-1-4503-9719-3/22/06},
year = {2022},
date = {2022-06-06},
urldate = {2022-06-06},
booktitle = {Proceedings of the 2022 International Conference on Advanced Visual Interfaces (AVI 2022)},
publisher = {ACM},
address = {NewYork, NY, USA},
abstract = {Nowadays, robots collaborate closely with humans in a growing number of areas. Enabled by lightweight materials and safety sensors , these cobots are gaining increasing popularity in domestic care, supporting people with physical impairments in their everyday lives. However, when cobots perform actions autonomously, it remains challenging for human collaborators to understand and predict their behavior. This, however, is crucial for achieving trust and user acceptance. One significant aspect of predicting cobot behavior is understanding their motion intent and comprehending how they "think" about their actions. We work on solutions that communicate the cobots AI-generated motion intent to a human collaborator. Effective communication enables users to proceed with the most suitable option. We present a design exploration with different visualization techniques to optimize this user understanding, ideally resulting in increased safety and end-user acceptance.},
keywords = {cobot, human-robot collaboration, intention feedback, neural network, visualization techniques},
pubstate = {published},
tppubtype = {inproceedings}
}
Nowadays, robots collaborate closely with humans in a growing number of areas. Enabled by lightweight materials and safety sensors , these cobots are gaining increasing popularity in domestic care, supporting people with physical impairments in their everyday lives. However, when cobots perform actions autonomously, it remains challenging for human collaborators to understand and predict their behavior. This, however, is crucial for achieving trust and user acceptance. One significant aspect of predicting cobot behavior is understanding their motion intent and comprehending how they "think" about their actions. We work on solutions that communicate the cobots AI-generated motion intent to a human collaborator. Effective communication enables users to proceed with the most suitable option. We present a design exploration with different visualization techniques to optimize this user understanding, ideally resulting in increased safety and end-user acceptance. |
Butz, Benjamin; Jussen, Alexander; Rafi, Asma; Lux, Gregor; Gerken, Jens A Taxonomy for Augmented and Mixed Reality Applications to Support Physical Exercises in Medical Rehabilitation—A Literature Review ArtikelNext Level Sports In: Healthcare, 10 (4), S. 646, 2022, ISSN: 2227-9032. @article{Butz2022,
title = {A Taxonomy for Augmented and Mixed Reality Applications to Support Physical Exercises in Medical Rehabilitation—A Literature Review},
author = {Benjamin Butz and Alexander Jussen and Asma Rafi and Gregor Lux and Jens Gerken},
url = {https://hci.w-hs.de/pub_healthcare-10-00646-v2/, PDF Download},
doi = {10.3390/healthcare10040646},
issn = {2227-9032},
year = {2022},
date = {2022-03-30},
journal = { Healthcare},
volume = {10},
number = {4},
pages = {646},
abstract = {In the past 20 years, a vast amount of research has shown that Augmented and Mixed Reality applications can support physical exercises in medical rehabilitation. In this paper, we contribute a taxonomy, providing an overview of the current state of research in this area. It is based on a comprehensive literature review conducted on the five databases Web of Science, ScienceDirect, PubMed, IEEE Xplore, and ACM up to July 2021. Out of 776 identified references, a final selection was made of 91 papers discussing the usage of visual stimuli delivered by AR/MR or similar technology to enhance the performance of physical exercises in medical rehabilitation. The taxonomy bridges the gap between a medical perspective (Patient Type, Medical Purpose) and the Interaction Design, focusing on Output Technologies and Visual Guidance. Most approaches aim to improve autonomy in the absence of a therapist and increase motivation to improve adherence. Technology is still focused on screen-based approaches, while the deeper analysis of Visual Guidance revealed 13 distinct, reoccurring abstract types of elements. Based on the analysis, implications and research opportunities are presented to guide future work},
keywords = {augmented reality, exercise, extended reality, medical, mixed reality, rehabilitation, therapy, virtual reality, visual cues, visualization},
pubstate = {published},
tppubtype = {article}
}
In the past 20 years, a vast amount of research has shown that Augmented and Mixed Reality applications can support physical exercises in medical rehabilitation. In this paper, we contribute a taxonomy, providing an overview of the current state of research in this area. It is based on a comprehensive literature review conducted on the five databases Web of Science, ScienceDirect, PubMed, IEEE Xplore, and ACM up to July 2021. Out of 776 identified references, a final selection was made of 91 papers discussing the usage of visual stimuli delivered by AR/MR or similar technology to enhance the performance of physical exercises in medical rehabilitation. The taxonomy bridges the gap between a medical perspective (Patient Type, Medical Purpose) and the Interaction Design, focusing on Output Technologies and Visual Guidance. Most approaches aim to improve autonomy in the absence of a therapist and increase motivation to improve adherence. Technology is still focused on screen-based approaches, while the deeper analysis of Visual Guidance revealed 13 distinct, reoccurring abstract types of elements. Based on the analysis, implications and research opportunities are presented to guide future work |
Kronhardt, Kirill; Rübner, Stephan; Pascher, Max; Goldau, Felix; Frese, Udo; Gerken, Jens Adapt or Perish? Exploring the Effectiveness of Adaptive DoF Control Interaction Methods for Assistive Robot Arms ArtikelDoF In: Technologies, 10 (1), 2022, ISSN: 2227-7080. @article{Kronhardt2022,
title = {Adapt or Perish? Exploring the Effectiveness of Adaptive DoF Control Interaction Methods for Assistive Robot Arms},
author = {Kirill Kronhardt and Stephan Rübner and Max Pascher and Felix Goldau and Udo Frese and Jens Gerken},
url = {https://hci.w-hs.de/pub_technologies-10-00030/, PDF Download
https://youtu.be/AEK4AOQKz1k},
doi = {10.3390/technologies10010030},
issn = {2227-7080},
year = {2022},
date = {2022-02-14},
urldate = {2022-02-14},
journal = {Technologies},
volume = {10},
number = {1},
abstract = {Robot arms are one of many assistive technologies used by people with motor impairments.Assistive robot arms can allow people to perform activities of daily living (ADL) involving graspingand manipulating objects in their environment without the assistance of caregivers. Suitable inputdevices (e.g., joysticks) mostly have two Degrees of Freedom (DoF), while most assistive robot armshave six or more. This results in time-consuming and cognitively demanding mode switches tochange the mapping of DoFs to control the robot. One option to decrease the difficulty of controllinga high-DoF assistive robot arm using a low-DoF input device is to assign different combinations ofmovement-DoFs to the device’s input DoFs depending on the current situation (adaptive control). Toexplore this method of control, we designed two adaptive control methods for a realistic virtual 3Denvironment. We evaluated our methods against a commonly used non-adaptive control method thatrequires the user to switch controls manually. This was conducted in a simulated remote study thatused Virtual Reality and involved 39 non-disabled participants. Our results show that the numberof mode switches necessary to complete a simple pick-and-place task decreases significantly whenusing an adaptive control type. In contrast, the task completion time and workload stay the same. Athematic analysis of qualitative feedback of our participants suggests that a longer period of trainingcould further improve the performance of adaptive control methods.},
keywords = {assistive robotics, augmented reality, human-robot interaction, shared user control, virtual reality, visual cues},
pubstate = {published},
tppubtype = {article}
}
Robot arms are one of many assistive technologies used by people with motor impairments.Assistive robot arms can allow people to perform activities of daily living (ADL) involving graspingand manipulating objects in their environment without the assistance of caregivers. Suitable inputdevices (e.g., joysticks) mostly have two Degrees of Freedom (DoF), while most assistive robot armshave six or more. This results in time-consuming and cognitively demanding mode switches tochange the mapping of DoFs to control the robot. One option to decrease the difficulty of controllinga high-DoF assistive robot arm using a low-DoF input device is to assign different combinations ofmovement-DoFs to the device’s input DoFs depending on the current situation (adaptive control). Toexplore this method of control, we designed two adaptive control methods for a realistic virtual 3Denvironment. We evaluated our methods against a commonly used non-adaptive control method thatrequires the user to switch controls manually. This was conducted in a simulated remote study thatused Virtual Reality and involved 39 non-disabled participants. Our results show that the numberof mode switches necessary to complete a simple pick-and-place task decreases significantly whenusing an adaptive control type. In contrast, the task completion time and workload stay the same. Athematic analysis of qualitative feedback of our participants suggests that a longer period of trainingcould further improve the performance of adaptive control methods. |
Pascher, Max; Kronhardt, Kirill; Franzen, Til; Gruenefeld, Uwe; Schneegass, Stefan; Gerken, Jens My Caregiver the Cobot: Comparing Visualization Techniques to Effectively Communicate Cobot Perception to People with Physical Impairments ArtikelMobILe In: Sensors, 22 (3), 2022, ISSN: 1424-8220. @article{Pascher2022,
title = {My Caregiver the Cobot: Comparing Visualization Techniques to Effectively Communicate Cobot Perception to People with Physical Impairments},
author = {Max Pascher and Kirill Kronhardt and Til Franzen and Uwe Gruenefeld and Stefan Schneegass and Jens Gerken},
url = {https://hci.w-hs.de/pub_sensors-22-00755-v2/, PDF Download},
doi = {10.3390/s22030755},
issn = {1424-8220},
year = {2022},
date = {2022-01-19},
journal = {Sensors},
volume = {22},
number = {3},
abstract = {Nowadays, robots are found in a growing number of areas where they collaborate closely with humans. Enabled by lightweight materials and safety sensors, these cobots increasingly gain popularity in domestic care, where they support people with physical impairments in their everyday lives. However, when cobots perform actions autonomously, it remains challenging for human collaborators to understand and predict their behavior, which is crucial for achieving trust and user acceptance. One significant aspect of predicting cobot behavior is understanding their perception and comprehending how they "see" the world. To tackle this challenge, we compared three different visualization techniques for Spatial Augmented Reality. All of these communicate cobot perception by visually indicating which objects in the cobot's surrounding have been identified by their sensors. We compared the well-established visualizations Wedge and Halo against our proposed visualization Line in a remote user study with participants suffering from physical impairments. In a second remote study, we validated these findings with a broader non-specific user base. Our findings show that Line, a lower complexity visualization, results in significantly faster reaction times compared to Halo, and lower task load compared to both Wedge and Halo. Overall, users prefer Line as a more straightforward visualization. In Spatial Augmented Reality, with its known disadvantage of limited projection area size, established off-screen visualizations are not effective in communicating cobot perception and Line presents an easy-to-understand alternative.},
keywords = {cobot, human-robot collaboration, projection, virtual reality, visualization techniques},
pubstate = {published},
tppubtype = {article}
}
Nowadays, robots are found in a growing number of areas where they collaborate closely with humans. Enabled by lightweight materials and safety sensors, these cobots increasingly gain popularity in domestic care, where they support people with physical impairments in their everyday lives. However, when cobots perform actions autonomously, it remains challenging for human collaborators to understand and predict their behavior, which is crucial for achieving trust and user acceptance. One significant aspect of predicting cobot behavior is understanding their perception and comprehending how they "see" the world. To tackle this challenge, we compared three different visualization techniques for Spatial Augmented Reality. All of these communicate cobot perception by visually indicating which objects in the cobot's surrounding have been identified by their sensors. We compared the well-established visualizations Wedge and Halo against our proposed visualization Line in a remote user study with participants suffering from physical impairments. In a second remote study, we validated these findings with a broader non-specific user base. Our findings show that Line, a lower complexity visualization, results in significantly faster reaction times compared to Halo, and lower task load compared to both Wedge and Halo. Overall, users prefer Line as a more straightforward visualization. In Spatial Augmented Reality, with its known disadvantage of limited projection area size, established off-screen visualizations are not effective in communicating cobot perception and Line presents an easy-to-understand alternative. |
Arévalo-Arboleda, Stephanie; Becker, Marvin; Gerken, Jens Does One Size Fit All? A Case Study to Discuss Findings of an Augmented Hands-Free Robot Teleoperation Concept for People with and without Motor Disabilities ArtikelMIA In: Technologies, 10 (1), 2022, ISSN: 2227-7080. @article{Arévalo-Arboleda2022,
title = {Does One Size Fit All? A Case Study to Discuss Findings of an Augmented Hands-Free Robot Teleoperation Concept for People with and without Motor Disabilities},
author = {Stephanie Arévalo-Arboleda and Marvin Becker and Jens Gerken},
editor = {Fillia Makedon},
url = {https://hci.w-hs.de/pub_technologies-10-00004-v2/, PDF Download},
doi = {10.3390/technologies10010004},
issn = {2227-7080},
year = {2022},
date = {2022-01-06},
issuetitle = {Selected Papers from the PETRA Conference Series},
journal = {Technologies},
volume = {10},
number = {1},
publisher = {MDPI},
abstract = {Hands-free robot teleoperation and augmented reality have the potential to create an inclusive environment for people with motor disabilities. It may allow them to teleoperate robotic arms to manipulate objects. However, the experiences evoked by the same teleoperation concept and augmented reality can vary significantly for people with motor disabilities compared to those without disabilities. In this paper, we report the experiences of Miss L., a person with multiple sclerosis, when teleoperating a robotic arm in a hands-free multimodal manner using a virtual menu and visual hints presented through the Microsoft HoloLens 2. We discuss our findings and compare her experiences to those of people without disabilities using the same teleoperation concept. Additionally, we present three learning points from comparing these experiences: a re-evaluation of the metrics used to measure performance, being aware of the bias, and considering variability in abilities, which evokes different experiences. We consider these learning points can be extrapolated to carrying human-robot interaction evaluations with mixed groups of participants with and without disabilities.},
keywords = {augmented reality, case study, hands-free interaction, learning points, people with motor disabilities, robot teleoperation},
pubstate = {published},
tppubtype = {article}
}
Hands-free robot teleoperation and augmented reality have the potential to create an inclusive environment for people with motor disabilities. It may allow them to teleoperate robotic arms to manipulate objects. However, the experiences evoked by the same teleoperation concept and augmented reality can vary significantly for people with motor disabilities compared to those without disabilities. In this paper, we report the experiences of Miss L., a person with multiple sclerosis, when teleoperating a robotic arm in a hands-free multimodal manner using a virtual menu and visual hints presented through the Microsoft HoloLens 2. We discuss our findings and compare her experiences to those of people without disabilities using the same teleoperation concept. Additionally, we present three learning points from comparing these experiences: a re-evaluation of the metrics used to measure performance, being aware of the bias, and considering variability in abilities, which evokes different experiences. We consider these learning points can be extrapolated to carrying human–robot interaction evaluations with mixed groups of participants with and without disabilities. |
2021
|
Baumeister, Annalies; Gardo, Elizaveta; Tolle, Patrizia; Klein, Barbara; Pascher, Max; Gerken, Jens; Goldau, Felix; Shivashankar, Yashaswini; Frese, Udo The Importance of Participatory Design for the Development of Assistive Robotic Arms. Initial Approaches and Experiences in the Research Projects MobILe and DoF-Adaptiv KonferenzbeitragMobILeDoF In: Proceedings of Connected Living : international and interdisciplinary conference (2021), Frankfurt am Main, 2021. @inproceedings{Baumeister2021,
title = {The Importance of Participatory Design for the Development of Assistive Robotic Arms. Initial Approaches and Experiences in the Research Projects MobILe and DoF-Adaptiv},
author = {Annalies Baumeister and Elizaveta Gardo and Patrizia Tolle and Barbara Klein and Max Pascher and Jens Gerken and Felix Goldau and Yashaswini Shivashankar and Udo Frese},
url = {https://hci.w-hs.de/pub_the_importance_of_participatory_design_for_the_development_of_assistive_robotic_arms_initial_approaches_and_experiences_in_the_research_projects_mobile_and_dof-adaptiv/, PDF Download},
doi = {https://doi.org/10.48718/8p7x-cw14},
year = {2021},
date = {2021-10-08},
booktitle = {Proceedings of Connected Living : international and interdisciplinary conference (2021)},
address = {Frankfurt am Main},
abstract = {This Article introduces two research projects towards assistive robotic arms for people with severe body impairments. Both projects aim to develop new control and interaction designs to promote accessibility and a better performance for people with functional losses in all four extremities, e.g. due to quadriplegic or multiple sclerosis. The project MobILe concentrates on using a robotic arm as drinking aid and controlling it with smart glasses, eye-tracking and augmented reality. A user oriented development process with participatory methods were pursued which brought new knowledge about the life and care situation of the future target group and the requirements a robotic drinking aid needs to meet. As a consequence the new project DoF-Adaptiv follows an even more participatory approach, including the future target group, their family and professional caregivers from the beginning into decision making and development processes within the project. DoF-Adaptiv aims to simplify the control modalities of assistive robotic arms to enhance the usability of the robotic arm for activities of daily living. lo decide on exemplary activities, like eating or open a door, the future target group, their family and professional caregivers are included in the decision making process. Furthermore all relevant stakeholders will be included in the investigation of ethical, legal and social implications as well as the identification of potential risks. This article will show the importance of the participatory design for the development and research process in MobILe and DoF-Adaptiv.},
keywords = {assistive robotics, human-centered design, participatory design, risk management, user acceptance},
pubstate = {published},
tppubtype = {inproceedings}
}
This Article introduces two research projects towards assistive robotic arms for people with severe body impairments. Both projects aim to develop new control and interaction designs to promote accessibility and a better performance for people with functional losses in all four extremities, e.g. due to quadriplegic or multiple sclerosis. The project MobILe concentrates on using a robotic arm as drinking aid and controlling it with smart glasses, eye-tracking and augmented reality. A user oriented development process with participatory methods were pursued which brought new knowledge about the life and care situation of the future target group and the requirements a robotic drinking aid needs to meet. As a consequence the new project DoF-Adaptiv follows an even more participatory approach, including the future target group, their family and professional caregivers from the beginning into decision making and development processes within the project. DoF-Adaptiv aims to simplify the control modalities of assistive robotic arms to enhance the usability of the robotic arm for activities of daily living. lo decide on exemplary activities, like eating or open a door, the future target group, their family and professional caregivers are included in the decision making process. Furthermore all relevant stakeholders will be included in the investigation of ethical, legal and social implications as well as the identification of potential risks. This article will show the importance of the participatory design for the development and research process in MobILe and DoF-Adaptiv. |
Borsum, Florian; Pascher, Max; Auda, Jonas; Schneegass, Stefan; Lux, Gregor; Gerken, Jens Stay on Course in VR: Comparing the Precision of Movement betweenGamepad, Armswinger, and Treadmill Konferenzbeitrag In: Mensch und Computer 2021 - Tagungsband, Gesellschaft für Informatik e.V., Bonn, 2021, ISBN: 978-1-4503-8645-6/21/09. @inproceedings{Borsum2021,
title = {Stay on Course in VR: Comparing the Precision of Movement betweenGamepad, Armswinger, and Treadmill},
author = {Florian Borsum and Max Pascher and Jonas Auda and Stefan Schneegass and Gregor Lux and Jens Gerken},
url = {https://hci.w-hs.de/pub_kurs_halten_in_vr__vergleich_der_bewegungspr_zision_von_gamepad__armswinger_und_laufstall/, PDF Download},
doi = {10.1145/3473856.3473880},
isbn = {978-1-4503-8645-6/21/09},
year = {2021},
date = {2021-09-05},
booktitle = {Mensch und Computer 2021 - Tagungsband},
publisher = {Gesellschaft für Informatik e.V.},
address = {Bonn},
abstract = {In diesem Beitrag wird untersucht, inwieweit verschiedene Formen von Fortbewegungstechniken in Virtual Reality UmgebungenEinfluss auf die Präzision bei der Interaktion haben. Dabei wurden insgesamt drei Techniken untersucht: Zwei der Techniken integrierendabei eine körperliche Aktivität, um einen hohen Grad an Realismus in der Bewegung zu erzeugen (Armswinger, Laufstall). Alsdritte Technik wurde ein Gamepad als Baseline herangezogen. In einer Studie mit 18 Proband:innen wurde die Präzision dieser dreiFortbewegungstechniken über sechs unterschiedliche Hindernisse in einem VR-Parcours untersucht. Die Ergebnisse zeigen, dassfür einzelne Hindernisse, die zum einen eine Kombination aus Vorwärts- und Seitwärtsbewegung erfordern (Slalom, Klippe) sowieauf Geschwindigkeit abzielen (Schiene), der Laufstall eine signifikant präzisere Steuerung ermöglicht als der Armswinger. Auf dengesamten Parcours gesehen ist jedoch kein Eingabegerät signifikant präziser als ein anderes. Die Benutzung des Laufstalls benötigtzudem signifikant mehr Zeit als Gamepad und Armswinger. Ebenso zeigte sich, dass das Ziel, eine reale Laufbewegung 1:1 abzubilden,auch mit einem Laufstall nach wie vor nicht erreicht wird, die Bewegung aber dennoch als intuitiv und immersiv wahrgenommenwird.},
keywords = {armswinger, gamepad, locomotion, precision, treadmill, virtual reality},
pubstate = {published},
tppubtype = {inproceedings}
}
In diesem Beitrag wird untersucht, inwieweit verschiedene Formen von Fortbewegungstechniken in Virtual Reality UmgebungenEinfluss auf die Präzision bei der Interaktion haben. Dabei wurden insgesamt drei Techniken untersucht: Zwei der Techniken integrierendabei eine körperliche Aktivität, um einen hohen Grad an Realismus in der Bewegung zu erzeugen (Armswinger, Laufstall). Alsdritte Technik wurde ein Gamepad als Baseline herangezogen. In einer Studie mit 18 Proband:innen wurde die Präzision dieser dreiFortbewegungstechniken über sechs unterschiedliche Hindernisse in einem VR-Parcours untersucht. Die Ergebnisse zeigen, dassfür einzelne Hindernisse, die zum einen eine Kombination aus Vorwärts- und Seitwärtsbewegung erfordern (Slalom, Klippe) sowieauf Geschwindigkeit abzielen (Schiene), der Laufstall eine signifikant präzisere Steuerung ermöglicht als der Armswinger. Auf dengesamten Parcours gesehen ist jedoch kein Eingabegerät signifikant präziser als ein anderes. Die Benutzung des Laufstalls benötigtzudem signifikant mehr Zeit als Gamepad und Armswinger. Ebenso zeigte sich, dass das Ziel, eine reale Laufbewegung 1:1 abzubilden,auch mit einem Laufstall nach wie vor nicht erreicht wird, die Bewegung aber dennoch als intuitiv und immersiv wahrgenommenwird. |
Pascher, Max; Baumeister, Annalies; Schneegass, Stefan; Klein, Barbara; Gerken, Jens Recommendations for the Development of a Robotic Drinking and Eating Aid - An Ethnographic Study KonferenzbeitragMobILe In: Rosa Lanzilotti Carmelo Ardito, Alessio Malizia (Hrsg.): Human-Computer Interaction – INTERACT 2021, Springer, Cham, 2021, ISBN: 978-3-030-85623-6. @inproceedings{Pascher2021,
title = {Recommendations for the Development of a Robotic Drinking and Eating Aid - An Ethnographic Study },
author = {Max Pascher and Annalies Baumeister and Stefan Schneegass and Barbara Klein and Jens Gerken},
editor = {Carmelo Ardito, Rosa Lanzilotti, Alessio Malizia, Helen Petrie, Antonio Piccinno, Giuseppe Desolda, Kori Inkpen},
url = {https://hci.w-hs.de/pub_recommendations_for_the_development_of_a_robotic_drinking_and_eating_aid___an_ethnographic_study/, PDF Download},
doi = {10.1007/978-3-030-85623-6_21},
isbn = {978-3-030-85623-6},
year = {2021},
date = {2021-09-01},
urldate = {2021-09-01},
booktitle = {Human-Computer Interaction – INTERACT 2021},
publisher = {Springer, Cham},
abstract = {Being able to live independently and self-determined in one's own home is a crucial factor for human dignity and preservation of self-worth. For people with severe physical impairments who cannot use their limbs for every day tasks, living in their own home is only possible with assistance from others. The inability to move arms and hands makes it hard to take care of oneself, e. g. drinking and eating independently. In this paper, we investigate how 15 participants with disabilities consume food and drinks. We report on interviews, participatory observations, and analyzed the aids they currently use. Based on our findings, we derive a set of recommendations that supports researchers and practitioners in designing future robotic drinking and eating aids for people with disabilities.},
keywords = {assisted living technologies, human-centered computing, meal assistance, participation design, people with disabilities, robot assistive drinking, robot assistive feeding, user acceptance, user participation, user-centered design},
pubstate = {published},
tppubtype = {inproceedings}
}
Being able to live independently and self-determined in one's own home is a crucial factor for human dignity and preservation of self-worth. For people with severe physical impairments who cannot use their limbs for every day tasks, living in their own home is only possible with assistance from others. The inability to move arms and hands makes it hard to take care of oneself, e. g. drinking and eating independently. In this paper, we investigate how 15 participants with disabilities consume food and drinks. We report on interviews, participatory observations, and analyzed the aids they currently use. Based on our findings, we derive a set of recommendations that supports researchers and practitioners in designing future robotic drinking and eating aids for people with disabilities. |
Olaya-Figueroa, Juan F.; Lakhnati, Younes; Gerken, Jens Facilitating Mind-Wandering Through Video Games KonferenzbeitragfutureWork In: Rosa Lanzilotti Carmelo Ardito, Alessio Malizia (Hrsg.): Human-Computer Interaction – INTERACT 2021, Springer, Cham, 2021. @inproceedings{Olaya-Figueroa2021,
title = {Facilitating Mind-Wandering Through Video Games},
author = {Juan F. Olaya-Figueroa and Younes Lakhnati and Jens Gerken},
editor = {Carmelo Ardito, Rosa Lanzilotti, Alessio Malizia, Helen Petrie, Antonio Piccinno, Giuseppe Desolda, Kori Inkpen},
url = {https://hci.w-hs.de/pub_museflow_interact2021/, PDF Download},
doi = {10.1007/978-3-030-85613-7_9},
year = {2021},
date = {2021-08-30},
booktitle = {Human-Computer Interaction – INTERACT 2021},
journal = {18th IFIP Technical Committee 13 International Conference on Human–Computer Interaction. INTERACT., Springer, Forthcoming.},
publisher = {Springer, Cham},
abstract = {Mind-wandering, i.e., letting the mind drift away from the task at hand, is mostly seen as a state of mind to avoid, as it may negatively impact the current task. However, evidence in cognitive science shows that mind-wandering can also positively affect creativity and problem-solving. Still, there is a lack of technological solutions to facilitate and utilize mind-wandering in such a specific way. In this short paper, we present MuseFlow, a video game designed to facilitate mind-wandering deliberately. Our study shows that MuseFlow induces mind-wandering significantly more often compared to a demanding game condition while maintaining the players' motivation to play and succeed in the game.},
keywords = {creativity, creativity support tools, game mechanics, mind-wandering, problem-solving, serious games, video game},
pubstate = {published},
tppubtype = {inproceedings}
}
Mind-wandering, i.e., letting the mind drift away from the task at hand, is mostly seen as a state of mind to avoid, as it may negatively impact the current task. However, evidence in cognitive science shows that mind-wandering can also positively affect creativity and problem-solving. Still, there is a lack of technological solutions to facilitate and utilize mind-wandering in such a specific way. In this short paper, we present MuseFlow, a video game designed to facilitate mind-wandering deliberately. Our study shows that MuseFlow induces mind-wandering significantly more often compared to a demanding game condition while maintaining the players' motivation to play and succeed in the game. |
Arévalo-Arboleda, Stephanie; Dierks, Tim; Ruecker, Franziska; Gerken, Jens Exploring the Visual Space to Improve Depth Perception in Robot Teleoperation using Augmented Reality: The Role of Distance and Target’s Pose in Time, Success, and Certainty KonferenzbeitragMIA In: Rosa Lanzilotti Carmelo Ardito, Alessio Malizia (Hrsg.): Human-Computer Interaction – INTERACT 2021, Springer, Cham, 2021. @inproceedings{Arévalo-Arboleda2021c,
title = {Exploring the Visual Space to Improve Depth Perception in Robot Teleoperation using Augmented Reality: The Role of Distance and Target’s Pose in Time, Success, and Certainty},
author = {Stephanie Arévalo-Arboleda and Tim Dierks and Franziska Ruecker and Jens Gerken},
editor = {Carmelo Ardito, Rosa Lanzilotti, Alessio Malizia, Helen Petrie, Antonio Piccinno, Giuseppe Desolda, Kori Inkpen},
url = {https://hci.w-hs.de/pub_interact21_mia_compressed-2/, PDF Download},
doi = {10.1007/978-3-030-85623-6_31},
year = {2021},
date = {2021-08-30},
booktitle = {Human-Computer Interaction – INTERACT 2021},
publisher = {Springer, Cham},
abstract = {Accurate depth perception in co-located teleoperation has the potential to improve task performance in manipulation and grasping tasks. We thus explore the operator's visual space and design visual
cues using augmented reality that aim to facilitate the positioning of the gripper above a target object before attempting to grasp it. The designs we propose include a virtual circle (Circle), virtual extensions (Extensions) from the gripper's fingers, and a color matching design using a real colormap with matching colored virtual circles (Colors). We conducted an experiment to evaluate these designs and the influence of distance from the operator to the workspace and the target object's pose. Here, we report on time, success, and perceived certainty in a grasping task. Our results show that a shorter distance leads to higher success, faster grasping time, and higher certainty. Concerning the target object's pose, a clear pose leads to higher success and certainty but interestingly slower task times. Regarding the design of cues, our results reveal that the simplicity of the Circle cue leads to the highest success and outperforms the most complex cue Colors also for task time, while the level of certainty
seems to be depending more on the distance than the type of cue. We consider that our results can serve as an initial analysis to further explore these factors both when designing to improve depth perception and within the context of co-located teleoperation.},
keywords = {augmented reality, depth perception, human-robot interaction, user study},
pubstate = {published},
tppubtype = {inproceedings}
}
Accurate depth perception in co-located teleoperation has the potential to improve task performance in manipulation and grasping tasks. We thus explore the operator's visual space and design visual
cues using augmented reality that aim to facilitate the positioning of the gripper above a target object before attempting to grasp it. The designs we propose include a virtual circle (Circle), virtual extensions (Extensions) from the gripper's fingers, and a color matching design using a real colormap with matching colored virtual circles (Colors). We conducted an experiment to evaluate these designs and the influence of distance from the operator to the workspace and the target object's pose. Here, we report on time, success, and perceived certainty in a grasping task. Our results show that a shorter distance leads to higher success, faster grasping time, and higher certainty. Concerning the target object's pose, a clear pose leads to higher success and certainty but interestingly slower task times. Regarding the design of cues, our results reveal that the simplicity of the Circle cue leads to the highest success and outperforms the most complex cue Colors also for task time, while the level of certainty
seems to be depending more on the distance than the type of cue. We consider that our results can serve as an initial analysis to further explore these factors both when designing to improve depth perception and within the context of co-located teleoperation. |
Karapanos, Evangelos; Gerken, Jens; Kjeldskov, Jesper; Skov, Mikael B. Advances in Longitudinal HCI Research Buch Springer, Cham, 2021, ISBN: 978-3-030-67322-2. @book{Karapanos2021,
title = {Advances in Longitudinal HCI Research},
author = {Evangelos Karapanos and Jens Gerken and Jesper Kjeldskov and Mikael B. Skov},
url = {https://hci.w-hs.de/pub_advances_in_longitudinal_hci_research_leseprobe_01/, PDF Download},
doi = {10.1007/978-3-030-67322-2},
isbn = {978-3-030-67322-2},
year = {2021},
date = {2021-08-12},
publisher = {Springer},
address = {Cham},
abstract = {Longitudinal studies have traditionally been seen as too cumbersome and labor-intensive to be of much use in research on Human-Computer Interaction (HCI). However, recent trends in market, legislation, and the research questions we address, have highlighted the importance of studying prolonged use, while technology itself has made longitudinal research more accessible to researchers across different application domains. Aimed as an educational resource for graduate students and researchers in HCI, this book brings together a collection of chapters, addressing theoretical and methodological considerations, and presenting case studies of longitudinal HCI research. Among others, the authors: discuss the theoretical underpinnings of longitudinal HCI research, such as when a longitudinal study is appropriate, what research questions can be addressed and what challenges are entailed in different longitudinal research designs reflect on methodological challenges in longitudinal data collection and analysis, such as how to maintain participant adherence and data reliability when employing the Experience Sampling Method in longitudinal settings, or how to cope with data collection fatigue and data safety in applications of autoethnography and autobiographical design, which may span from months to several years present a number of case studies covering different topics of longitudinal HCI research, from “slow technology”, to self-tracking, to mid-air haptic feedback, and crowdsourcing.},
keywords = {Field studies, longitudinal hci research, longitudinal studies in information systems, research designs, user study},
pubstate = {published},
tppubtype = {book}
}
Longitudinal studies have traditionally been seen as too cumbersome and labor-intensive to be of much use in research on Human-Computer Interaction (HCI). However, recent trends in market, legislation, and the research questions we address, have highlighted the importance of studying prolonged use, while technology itself has made longitudinal research more accessible to researchers across different application domains. Aimed as an educational resource for graduate students and researchers in HCI, this book brings together a collection of chapters, addressing theoretical and methodological considerations, and presenting case studies of longitudinal HCI research. Among others, the authors: discuss the theoretical underpinnings of longitudinal HCI research, such as when a longitudinal study is appropriate, what research questions can be addressed and what challenges are entailed in different longitudinal research designs reflect on methodological challenges in longitudinal data collection and analysis, such as how to maintain participant adherence and data reliability when employing the Experience Sampling Method in longitudinal settings, or how to cope with data collection fatigue and data safety in applications of autoethnography and autobiographical design, which may span from months to several years present a number of case studies covering different topics of longitudinal HCI research, from “slow technology”, to self-tracking, to mid-air haptic feedback, and crowdsourcing. |
Arévalo-Arboleda, Stephanie; Pascher, Max; Baumeister, Annalies; Klein, Barbara; Gerken, Jens Reflecting upon Participatory Design in Human-Robot Collaboration for People with Motor Disabilities: Challenges and Lessons Learned from Three Multiyear Projects KonferenzbeitragMIAMobILe In: The 14th PErvasive Technologies Related to Assistive Environments Conference - PETRA 2021, ACM 2021, ISBN: 978-1-4503-8792-7/21/06. @inproceedings{Arévalo-Arboleda2021b,
title = {Reflecting upon Participatory Design in Human-Robot Collaboration for People with Motor Disabilities: Challenges and Lessons Learned from Three Multiyear Projects},
author = {Stephanie Arévalo-Arboleda and Max Pascher and Annalies Baumeister and Barbara Klein and Jens Gerken},
url = {https://hci.w-hs.de/pub_petra_2021_cameraready/, PDF Download},
doi = {10.1145/3453892.3458044},
isbn = {978-1-4503-8792-7/21/06},
year = {2021},
date = {2021-06-29},
booktitle = {The 14th PErvasive Technologies Related to Assistive Environments Conference - PETRA 2021},
organization = {ACM},
abstract = {Human-robot technology has the potential to positively impact the lives of people with motor disabilities. However, current efforts have mostly been oriented towards technology (sensors, devices, modalities, interaction techniques), thus relegating the user and their valuable input to the wayside. In this paper, we aim to present a holistic perspective of the role of participatory design in Human-Robot Collaboration (HRC) for People with Motor Disabilities (PWMD). We have been involved in several multiyear projects related to HRC for PWMD, where we encountered different challenges related to planning and participation, preferences of stakeholders, using certain participatory design techniques, technology exposure, as well as ethical, legal, and social implications. These challenges helped us provide five lessons learned that could serve as a guideline to researchers when using participatory design with vulnerable groups. In particular, young researchers who are starting to explore HRC research for people with disabilities.},
keywords = {accessibility design, human-robot collaboration, lessons learned, participatory design},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-robot technology has the potential to positively impact the lives of people with motor disabilities. However, current efforts have mostly been oriented towards technology (sensors, devices, modalities, interaction techniques), thus relegating the user and their valuable input to the wayside. In this paper, we aim to present a holistic perspective of the role of participatory design in Human-Robot Collaboration (HRC) for People with Motor Disabilities (PWMD). We have been involved in several multiyear projects related to HRC for PWMD, where we encountered different challenges related to planning and participation, preferences of stakeholders, using certain participatory design techniques, technology exposure, as well as ethical, legal, and social implications. These challenges helped us provide five lessons learned that could serve as a guideline to researchers when using participatory design with vulnerable groups. In particular, young researchers who are starting to explore HRC research for people with disabilities. |
Arévalo-Arboleda, Stephanie; Ruecker, Franziska; Dierks, Tim; Gerken, Jens Assisting Manipulation and Grasping in Robot Teleoperation with Augmented Reality Visual Cues KonferenzbeitragMIA In: CHI Conference on Human Factors in Computing Systems (CHI '21), ACM, 2021, ISBN: 978-1-4503-8096-6/21/05. @inproceedings{Arevalo-Arboleda2021,
title = {Assisting Manipulation and Grasping in Robot Teleoperation with Augmented Reality Visual Cues},
author = {Stephanie Arévalo-Arboleda and Franziska Ruecker and Tim Dierks and Jens Gerken},
url = {https://hci.w-hs.de/pub_VisualCuesCHI_compressed/, PDF Download},
doi = {10.1145/3411764.3445398},
isbn = {978-1-4503-8096-6/21/05},
year = {2021},
date = {2021-05-03},
booktitle = {CHI Conference on Human Factors in Computing Systems (CHI '21)},
publisher = {ACM},
abstract = {Teleoperating industrial manipulators in co-located spaces can be challenging. Facilitating robot teleoperation by providing additional visual information about the environment and the robot affordances using augmented reality (AR), can improve task performance in manipulation and grasping. In this paper, we present two designs of augmented visual cues, that aim to enhance the visual space of the robot operator through hints about the position of the robot gripper in the workspace and in relation to the target. These visual cues aim to improve the distance perception and thus, the task performance. We evaluate both designs against a baseline in an experiment where participants teleoperate a robotic arm to perform pick-and-place tasks. Our results show performance improvements in different levels, reflecting in objective and subjective measures with trade-offs in terms of time, accuracy, and participants’ views of teleoperation. These findings show the potential of AR not only in teleoperation, but in understanding the human-robot workspace.},
keywords = {augmented reality, depth perception, hands-free interaction, human-robot interaction, teleoperation, visual cues},
pubstate = {published},
tppubtype = {inproceedings}
}
Teleoperating industrial manipulators in co-located spaces can be challenging. Facilitating robot teleoperation by providing additional visual information about the environment and the robot affordances using augmented reality (AR), can improve task performance in manipulation and grasping. In this paper, we present two designs of augmented visual cues, that aim to enhance the visual space of the robot operator through hints about the position of the robot gripper in the workspace and in relation to the target. These visual cues aim to improve the distance perception and thus, the task performance. We evaluate both designs against a baseline in an experiment where participants teleoperate a robotic arm to perform pick-and-place tasks. Our results show performance improvements in different levels, reflecting in objective and subjective measures with trade-offs in terms of time, accuracy, and participants’ views of teleoperation. These findings show the potential of AR not only in teleoperation, but in understanding the human-robot workspace. |
2020
|
Pascher, Max Praxisbeispiel Digitalisierung konkret: Wenn der Stromzähler weiß, ob es Oma gut geht. Beschreibung des minimalinvasiven Frühwarnsystems „ZELIA“ Buchkapitel In: Vilain, Michael (Hrsg.): Wege in die digitale Zukunft - Was bedeuten Smart Living, Big Data, Robotik & Co für die Sozialwirtschaft?, S. 137-148, Nomos Verlagsgesellschaft mbH & Co. KG, 2020, ISBN: 78-3-8487-6621-5. @inbook{Pascher2020,
title = {Praxisbeispiel Digitalisierung konkret: Wenn der Stromzähler weiß, ob es Oma gut geht. Beschreibung des minimalinvasiven Frühwarnsystems „ZELIA“},
author = {Max Pascher},
editor = {Michael Vilain},
url = {https://hci.w-hs.de/pub_digitalisierung_konkret-wenn_der_stromzaehler_weiss_ob_es_oma_gut_geht/, PDF Download},
doi = {10.5771/9783748907008-137},
isbn = {78-3-8487-6621-5},
year = {2020},
date = {2020-10-06},
booktitle = {Wege in die digitale Zukunft - Was bedeuten Smart Living, Big Data, Robotik & Co für die Sozialwirtschaft?},
pages = {137-148},
publisher = {Nomos Verlagsgesellschaft mbH & Co. KG},
keywords = {aal, consumption, data science, elderly, monitoring, wellbeing},
pubstate = {published},
tppubtype = {inbook}
}
|
Arévalo-Arboleda, Stephanie; Pascher, Max; Lakhnati, Younes; Gerken, Jens Understanding Human-Robot Collaboration for People with Mobility Impairments at the Workplace, a Thematic Analysis KonferenzbeitragMIA In: RO-MAN 2020 - IEEE International Conference on Robot and Human Interactive Communication, IEEE, 2020, ISBN: 978-1-7281-6075-7. @inproceedings{Arévalo-Arboleda2020b,
title = {Understanding Human-Robot Collaboration for People with Mobility Impairments at the Workplace, a Thematic Analysis},
author = {Stephanie Arévalo-Arboleda and Max Pascher and Younes Lakhnati and Jens Gerken},
url = {https://hci.w-hs.de/pub_understanding_hrc_ta/, PDF Download},
doi = {10.1109/RO-MAN47096.2020.9223489},
isbn = {978-1-7281-6075-7},
year = {2020},
date = {2020-07-31},
booktitle = {RO-MAN 2020 - IEEE International Conference on Robot and Human Interactive Communication},
publisher = {IEEE},
abstract = {Assistive technologies, in particular human-robot collaboration, have the potential to ease the life of people with physical mobility impairments in social and economic activities. Currently, this group of people has lower rates of economic participation, due to the lack of adequate environments adapted to their capabilities. We take a closer look at the needs and preferences of people with physical mobility impairments in a human-robot cooperative environment at the workplace. Specifically, we aim to design how to control a robotic arm in manufacturing tasks for people with physical mobility impairments. We present a case study of a shelteredworkshop as a prototype for an institution that employs people with disabilities in manufacturing jobs. Here, we collected data of potential end-users with physical mobility impairments, social workers, and supervisors using a Participatory Design technique (Future-Workshop). These stakeholders were divided into two groups, end-users and supervising personnel (social workers, supervisors), which were run across two separate sessions. The gathered information was analyzed using thematic analysis to reveal underlying themes across stakeholders. We identified concepts that highlight underlying concerns related to the robot fitting into the social and organizational structure, human-robot synergy, and human-robot problem management. In this paper, we present our findings and discuss the implications of each theme when shaping an inclusive humanrobot cooperative workstation for people with physical mobility impairments.},
keywords = {assistive robotics, creating human-robot relationships, hri and collaboration in manufacturing environments},
pubstate = {published},
tppubtype = {inproceedings}
}
Assistive technologies, in particular human-robot collaboration, have the potential to ease the life of people with physical mobility impairments in social and economic activities. Currently, this group of people has lower rates of economic participation, due to the lack of adequate environments adapted to their capabilities. We take a closer look at the needs and preferences of people with physical mobility impairments in a human-robot cooperative environment at the workplace. Specifically, we aim to design how to control a robotic arm in manufacturing tasks for people with physical mobility impairments. We present a case study of a shelteredworkshop as a prototype for an institution that employs people with disabilities in manufacturing jobs. Here, we collected data of potential end-users with physical mobility impairments, social workers, and supervisors using a Participatory Design technique (Future-Workshop). These stakeholders were divided into two groups, end-users and supervising personnel (social workers, supervisors), which were run across two separate sessions. The gathered information was analyzed using thematic analysis to reveal underlying themes across stakeholders. We identified concepts that highlight underlying concerns related to the robot fitting into the social and organizational structure, human-robot synergy, and human-robot problem management. In this paper, we present our findings and discuss the implications of each theme when shaping an inclusive humanrobot cooperative workstation for people with physical mobility impairments. |
Dierks, Tim Visual Cues: Integration of object pose recognition with an augmented reality system as means to support visual perception in human-robot control AbschlussarbeitMIA Westfälische Hochschule, Neidenburger Straße 43, 45897 Gelsenkirchen, 2020. @mastersthesis{Dierks2020,
title = {Visual Cues: Integration of object pose recognition with an augmented reality system as means to support visual perception in human-robot control},
author = {Tim Dierks},
url = {https://hci.w-hs.de/pub_dierks_tim_masterthesis/, PDF Download},
year = {2020},
date = {2020-05-28},
address = {Neidenburger Straße 43, 45897 Gelsenkirchen},
school = {Westfälische Hochschule},
abstract = {Autonomy and self-determination are fundamental aspects of living in our society. Supporting people for whom this freedom is limited due to physical impairments is the fundamental goal of this thesis. Especially for people who are paralyzed, even working at a desk job is often not feasible. Therefore, in this thesis a prototype of a robot assembly workstation was constructed that utilizes a modern Augmented Reality (AR)-Head-Mounted Display (HMD) to control a robotic arm. Through the use of object pose recognition, the objects in the working environment are detected and this information is used to display different visual cues at the robotic arm or in its vicinity. Providing the users with additional depth information and helping them determine object relations, which are often not easily
discernible from a fixed perspective. To achieve this a hands-free AR-based robot-control scheme was developed, which uses speech and head-movement for interaction. Additionally, multiple advanced visual cues were designed that utilize object pose detection for spatial-visual support. The pose recognition system is adapted from state-of-the-art research in computer vision to allow the detection of arbitrary objects with no regard for texture or shape. Two evaluations were performed, a small user study that excluded the object recognition, which confirms the general usability of the system and gives an impression on its performance. The participants were able to perform difficult pick and place tasks with a high success rate. Secondly, a technical evaluation of the object recognition system was conducted, which revealed an adequate prediction precision, but is too unreliable for real-world scenarios as the prediction quality is highly variable and depends on object orientations and occlusion.},
keywords = {augmented reality, hands-free interaction, human-robot interaction, pose recognition},
pubstate = {published},
tppubtype = {mastersthesis}
}
Autonomy and self-determination are fundamental aspects of living in our society. Supporting people for whom this freedom is limited due to physical impairments is the fundamental goal of this thesis. Especially for people who are paralyzed, even working at a desk job is often not feasible. Therefore, in this thesis a prototype of a robot assembly workstation was constructed that utilizes a modern Augmented Reality (AR)-Head-Mounted Display (HMD) to control a robotic arm. Through the use of object pose recognition, the objects in the working environment are detected and this information is used to display different visual cues at the robotic arm or in its vicinity. Providing the users with additional depth information and helping them determine object relations, which are often not easily
discernible from a fixed perspective. To achieve this a hands-free AR-based robot-control scheme was developed, which uses speech and head-movement for interaction. Additionally, multiple advanced visual cues were designed that utilize object pose detection for spatial-visual support. The pose recognition system is adapted from state-of-the-art research in computer vision to allow the detection of arbitrary objects with no regard for texture or shape. Two evaluations were performed, a small user study that excluded the object recognition, which confirms the general usability of the system and gives an impression on its performance. The participants were able to perform difficult pick and place tasks with a high success rate. Secondly, a technical evaluation of the object recognition system was conducted, which revealed an adequate prediction precision, but is too unreliable for real-world scenarios as the prediction quality is highly variable and depends on object orientations and occlusion. |
Ruecker, Franziska Visuelle Helfer: Ein Augmented Reality Prototyp zur Unterstützung der visuellen Wahrnehmung für die Steuerung eines Roboterarms AbschlussarbeitMIA Westfälische Hochschule, Neidenburger Straße 43, 45897 Gelsenkirchen, 2020. @mastersthesis{Ruecker2020,
title = {Visuelle Helfer: Ein Augmented Reality Prototyp zur Unterstützung der visuellen Wahrnehmung für die Steuerung eines Roboterarms},
author = {Franziska Ruecker},
url = {https://hci.w-hs.de/pub_rueckermasterarbeit_komprimiert/, PDF Download},
year = {2020},
date = {2020-05-14},
address = {Neidenburger Straße 43, 45897 Gelsenkirchen},
school = {Westfälische Hochschule},
abstract = {Körperliche Behinderungen können einen Menschen soweit einschränken, dass für sie ein autonomes und selbstbestimmtes Leben, trotz intakter mentaler und kognitiven Fähigkeiten, nicht mehr möglich ist. Daher ist für Menschen, die beispielsweise vom Hals abwärts gelähmt sind, sogenannten Tetraplegikern, jede Zurückgewinnung von Autonomie eine Steigerung der Lebensqualität. In dieser Masterarbeit wird ein Augmented Reality Prototyp entwickelt, der es Tetraplegikern oder Menschen mit einer ähnlichen körperlichen Einschränkung erlaubt, an einem Mensch-Roboter Arbeitsplatz Montageaufgaben durchzuführen und ihnen somit eine Integration ins Arbeitsleben ermöglichen kann. Der Prototyp erlaubt es den Benutzer ohne die Nutzung der Hände, einen Kuka iiwa Roboterarm mit der Microsoft HoloLens zu steuern. Dabei wird ein Fokus darauf gelegt, das Blickfeld des Benutzers mit speziellen virtuellen Visualisierungen, sogenannten visuellen Helfern, anzureichern, um die Nachteile, die durch die Bewegungseinschränkungen der Zielgruppe ausgelöst werden, auszugleichen. Diese visuellen Helfer sollen bei der Steuerung des Roboterarms unterstützen und die Bedienung des Prototyps verbessern. Eine Evaluation des Prototyps zeigte Tendenzen, dass das Konzept der visuellen Helfer den Benutzer den Roboterarm präziser steuern lässt und seine Bedienung unterstützt.},
keywords = {augmented reality, evaluation, hands-free interaction, human-robot interaction},
pubstate = {published},
tppubtype = {mastersthesis}
}
Körperliche Behinderungen können einen Menschen soweit einschränken, dass für sie ein autonomes und selbstbestimmtes Leben, trotz intakter mentaler und kognitiven Fähigkeiten, nicht mehr möglich ist. Daher ist für Menschen, die beispielsweise vom Hals abwärts gelähmt sind, sogenannten Tetraplegikern, jede Zurückgewinnung von Autonomie eine Steigerung der Lebensqualität. In dieser Masterarbeit wird ein Augmented Reality Prototyp entwickelt, der es Tetraplegikern oder Menschen mit einer ähnlichen körperlichen Einschränkung erlaubt, an einem Mensch-Roboter Arbeitsplatz Montageaufgaben durchzuführen und ihnen somit eine Integration ins Arbeitsleben ermöglichen kann. Der Prototyp erlaubt es den Benutzer ohne die Nutzung der Hände, einen Kuka iiwa Roboterarm mit der Microsoft HoloLens zu steuern. Dabei wird ein Fokus darauf gelegt, das Blickfeld des Benutzers mit speziellen virtuellen Visualisierungen, sogenannten visuellen Helfern, anzureichern, um die Nachteile, die durch die Bewegungseinschränkungen der Zielgruppe ausgelöst werden, auszugleichen. Diese visuellen Helfer sollen bei der Steuerung des Roboterarms unterstützen und die Bedienung des Prototyps verbessern. Eine Evaluation des Prototyps zeigte Tendenzen, dass das Konzept der visuellen Helfer den Benutzer den Roboterarm präziser steuern lässt und seine Bedienung unterstützt. |
Arévalo-Arboleda, Stephanie; Dierks, Tim; Ruecker, Franziska; Gerken, Jens There’s More than Meets the Eye: Enhancing Robot Control through Augmented Visual Cues KonferenzbeitragMIA In: HRI 2020 - ACM/IEEE International Conference on Human-Robot Interaction, 2020, ISBN: 978-1-4503-7057. @inproceedings{Arévalo-Arboleda2020,
title = {There’s More than Meets the Eye: Enhancing Robot Control through Augmented Visual Cues},
author = {Stephanie Arévalo-Arboleda and Tim Dierks and Franziska Ruecker and Jens Gerken},
url = {https://hci.w-hs.de/pub_lbr1017_visualcues_arevalo_cameraready/, PDF Download},
doi = {10.1145/3371382.3378240},
isbn = {978-1-4503-7057},
year = {2020},
date = {2020-03-23},
booktitle = {HRI 2020 - ACM/IEEE International Conference on Human-Robot Interaction},
abstract = {In this paper, we present the design of a visual feedback mechanism using Augmented Reality, which we call augmented visual cues, to assist pick-and-place tasks during robot control. We propose to augment the robot operator’s visual space in order to avoid attention splitting and increase situational awareness (SA). In particular, we aim to improve on the SA concepts of perception, comprehension,
and projection as well as the overall task performance. For that, we built upon the interaction design paradigm proposed by Walker et al.. On the one hand, our design augments the robot to support picking-tasks; and, on the other hand, we augment the environment to support placing-tasks. We evaluated our design in a first user study, and results point to specific design aspects that need improvement while showing promise for the overall approach, in particular regarding user satisfaction and certain SA concepts.},
keywords = {augmented reality, human-robot interaction, visualization},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we present the design of a visual feedback mechanism using Augmented Reality, which we call augmented visual cues, to assist pick-and-place tasks during robot control. We propose to augment the robot operator’s visual space in order to avoid attention splitting and increase situational awareness (SA). In particular, we aim to improve on the SA concepts of perception, comprehension,
and projection as well as the overall task performance. For that, we built upon the interaction design paradigm proposed by Walker et al.. On the one hand, our design augments the robot to support picking-tasks; and, on the other hand, we augment the environment to support placing-tasks. We evaluated our design in a first user study, and results point to specific design aspects that need improvement while showing promise for the overall approach, in particular regarding user satisfaction and certain SA concepts. |