2021
|
Arévalo-Arboleda, Stephanie; Pascher, Max; Baumeister, Annalies; Klein, Barbara; Gerken, Jens Reflecting upon Participatory Design in Human-Robot Collaboration for People with Motor Disabilities: Challenges and Lessons Learned from Three Multiyear Projects InproceedingsForthcomingMIAMobILe The 14th PErvasive Technologies Related to Assistive Environments Conference - PETRA 2021, ACM Forthcoming, ISBN: 978-1-4503-8792-7/21/06. Abstract | BibTeX | Links:  @inproceedings{Arévalo-Arboleda2021b,
title = {Reflecting upon Participatory Design in Human-Robot Collaboration for People with Motor Disabilities: Challenges and Lessons Learned from Three Multiyear Projects},
author = {Stephanie Arévalo-Arboleda and Max Pascher and Annalies Baumeister and Barbara Klein and Jens Gerken},
doi = {10.1145/3453892.3458044},
isbn = {978-1-4503-8792-7/21/06},
year = {2021},
date = {2021-06-29},
booktitle = {The 14th PErvasive Technologies Related to Assistive Environments Conference - PETRA 2021},
organization = {ACM},
abstract = {Human-robot technology has the potential to positively impact the lives of people with motor disabilities. However, current efforts have mostly been oriented towards technology (sensors, devices, modalities, interaction techniques), thus relegating the user and their valuable input to the wayside. In this paper, we aim to present a holistic perspective of the role of participatory design in Human-Robot Collaboration (HRC) for People with Motor Disabilities (PWMD). We have been involved in several multiyear projects related to HRC for PWMD, where we encountered different challenges related to planning and participation, preferences of stakeholders, using certain participatory design techniques, technology exposure, as well as ethical, legal, and social implications. These challenges helped us provide five lessons learned that could serve as a guideline to researchers when using participatory design with vulnerable groups. In particular, young researchers who are starting to explore HRC research for people with disabilities.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Human-robot technology has the potential to positively impact the lives of people with motor disabilities. However, current efforts have mostly been oriented towards technology (sensors, devices, modalities, interaction techniques), thus relegating the user and their valuable input to the wayside. In this paper, we aim to present a holistic perspective of the role of participatory design in Human-Robot Collaboration (HRC) for People with Motor Disabilities (PWMD). We have been involved in several multiyear projects related to HRC for PWMD, where we encountered different challenges related to planning and participation, preferences of stakeholders, using certain participatory design techniques, technology exposure, as well as ethical, legal, and social implications. These challenges helped us provide five lessons learned that could serve as a guideline to researchers when using participatory design with vulnerable groups. In particular, young researchers who are starting to explore HRC research for people with disabilities. |
Arévalo-Arboleda, Stephanie; Ruecker, Franziska; Dierks, Tim; Gerken, Jens Assisting Manipulation and Grasping in Robot Teleoperation with Augmented Reality Visual Cues InproceedingsForthcomingMIA CHI Conference on Human Factors in Computing Systems (CHI '21), ACM, Forthcoming, ISBN: 978-1-4503-8096-6/21/05. Abstract | BibTeX | Links:   @inproceedings{Arevalo-Arboleda2021,
title = {Assisting Manipulation and Grasping in Robot Teleoperation with Augmented Reality Visual Cues},
author = {Stephanie Arévalo-Arboleda and Franziska Ruecker and Tim Dierks and Jens Gerken},
url = {https://hci.w-hs.de/pub_VisualCuesCHI_compressed/, PDF Download},
doi = {10.1145/3411764.3445398},
isbn = {978-1-4503-8096-6/21/05},
year = {2021},
date = {2021-05-03},
booktitle = {CHI Conference on Human Factors in Computing Systems (CHI '21)},
publisher = {ACM},
abstract = {Teleoperating industrial manipulators in co-located spaces can be challenging. Facilitating robot teleoperation by providing additional visual information about the environment and the robot affordances using augmented reality (AR), can improve task performance in manipulation and grasping. In this paper, we present two designs of augmented visual cues, that aim to enhance the visual space of the robot operator through hints about the position of the robot gripper in the workspace and in relation to the target. These visual cues aim to improve the distance perception and thus, the task performance. We evaluate both designs against a baseline in an experiment where participants teleoperate a robotic arm to perform pick-and-place tasks. Our results show performance improvements in different levels, reflecting in objective and subjective measures with trade-offs in terms of time, accuracy, and participants’ views of teleoperation. These findings show the potential of AR not only in teleoperation, but in understanding the human-robot workspace.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Teleoperating industrial manipulators in co-located spaces can be challenging. Facilitating robot teleoperation by providing additional visual information about the environment and the robot affordances using augmented reality (AR), can improve task performance in manipulation and grasping. In this paper, we present two designs of augmented visual cues, that aim to enhance the visual space of the robot operator through hints about the position of the robot gripper in the workspace and in relation to the target. These visual cues aim to improve the distance perception and thus, the task performance. We evaluate both designs against a baseline in an experiment where participants teleoperate a robotic arm to perform pick-and-place tasks. Our results show performance improvements in different levels, reflecting in objective and subjective measures with trade-offs in terms of time, accuracy, and participants’ views of teleoperation. These findings show the potential of AR not only in teleoperation, but in understanding the human-robot workspace. |
2020
|
Arévalo-Arboleda, Stephanie; Pascher, Max; Lakhnati, Younes; Gerken, Jens Understanding Human-Robot Collaboration for People with Mobility Impairments at the Workplace, a Thematic Analysis InproceedingsMIA RO-MAN 2020 - IEEE International Conference on Robot and Human Interactive Communication, IEEE, 2020, ISBN: 978-1-7281-6075-7. Abstract | BibTeX | Links:   @inproceedings{Arévalo-Arboleda2020b,
title = {Understanding Human-Robot Collaboration for People with Mobility Impairments at the Workplace, a Thematic Analysis},
author = {Stephanie Arévalo-Arboleda and Max Pascher and Younes Lakhnati and Jens Gerken},
url = {https://hci.w-hs.de/pub_understanding_hrc_ta/, PDF Download},
doi = {10.1109/RO-MAN47096.2020.9223489},
isbn = {978-1-7281-6075-7},
year = {2020},
date = {2020-07-31},
booktitle = {RO-MAN 2020 - IEEE International Conference on Robot and Human Interactive Communication},
publisher = {IEEE},
abstract = {Assistive technologies, in particular human-robot collaboration, have the potential to ease the life of people with physical mobility impairments in social and economic activities. Currently, this group of people has lower rates of economic participation, due to the lack of adequate environments adapted to their capabilities. We take a closer look at the needs and preferences of people with physical mobility impairments in a human-robot cooperative environment at the workplace. Specifically, we aim to design how to control a robotic arm in manufacturing tasks for people with physical mobility impairments. We present a case study of a shelteredworkshop as a prototype for an institution that employs people with disabilities in manufacturing jobs. Here, we collected data of potential end-users with physical mobility impairments, social workers, and supervisors using a Participatory Design technique (Future-Workshop). These stakeholders were divided into two groups, end-users and supervising personnel (social workers, supervisors), which were run across two separate sessions. The gathered information was analyzed using thematic analysis to reveal underlying themes across stakeholders. We identified concepts that highlight underlying concerns related to the robot fitting into the social and organizational structure, human-robot synergy, and human-robot problem management. In this paper, we present our findings and discuss the implications of each theme when shaping an inclusive humanrobot cooperative workstation for people with physical mobility impairments.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Assistive technologies, in particular human-robot collaboration, have the potential to ease the life of people with physical mobility impairments in social and economic activities. Currently, this group of people has lower rates of economic participation, due to the lack of adequate environments adapted to their capabilities. We take a closer look at the needs and preferences of people with physical mobility impairments in a human-robot cooperative environment at the workplace. Specifically, we aim to design how to control a robotic arm in manufacturing tasks for people with physical mobility impairments. We present a case study of a shelteredworkshop as a prototype for an institution that employs people with disabilities in manufacturing jobs. Here, we collected data of potential end-users with physical mobility impairments, social workers, and supervisors using a Participatory Design technique (Future-Workshop). These stakeholders were divided into two groups, end-users and supervising personnel (social workers, supervisors), which were run across two separate sessions. The gathered information was analyzed using thematic analysis to reveal underlying themes across stakeholders. We identified concepts that highlight underlying concerns related to the robot fitting into the social and organizational structure, human-robot synergy, and human-robot problem management. In this paper, we present our findings and discuss the implications of each theme when shaping an inclusive humanrobot cooperative workstation for people with physical mobility impairments. |
Arévalo-Arboleda, Stephanie; Dierks, Tim; Ruecker, Franziska; Gerken, Jens There’s More than Meets the Eye: Enhancing Robot Control through Augmented Visual Cues InproceedingsMIA HRI 2020 - ACM/IEEE International Conference on Human-Robot Interaction, 2020, ISBN: 978-1-4503-7057. Abstract | BibTeX | Links:   @inproceedings{Arévalo-Arboleda2020,
title = {There’s More than Meets the Eye: Enhancing Robot Control through Augmented Visual Cues},
author = {Stephanie Arévalo-Arboleda and Tim Dierks and Franziska Ruecker and Jens Gerken},
url = {https://hci.w-hs.de/pub_lbr1017_visualcues_arevalo_cameraready/, PDF Download},
doi = {10.1145/3371382.3378240},
isbn = {978-1-4503-7057},
year = {2020},
date = {2020-03-23},
booktitle = {HRI 2020 - ACM/IEEE International Conference on Human-Robot Interaction},
abstract = {In this paper, we present the design of a visual feedback mechanism using Augmented Reality, which we call augmented visual cues, to assist pick-and-place tasks during robot control. We propose to augment the robot operator’s visual space in order to avoid attention splitting and increase situational awareness (SA). In particular, we aim to improve on the SA concepts of perception, comprehension,
and projection as well as the overall task performance. For that, we built upon the interaction design paradigm proposed by Walker et al.. On the one hand, our design augments the robot to support picking-tasks; and, on the other hand, we augment the environment to support placing-tasks. We evaluated our design in a first user study, and results point to specific design aspects that need improvement while showing promise for the overall approach, in particular regarding user satisfaction and certain SA concepts.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper, we present the design of a visual feedback mechanism using Augmented Reality, which we call augmented visual cues, to assist pick-and-place tasks during robot control. We propose to augment the robot operator’s visual space in order to avoid attention splitting and increase situational awareness (SA). In particular, we aim to improve on the SA concepts of perception, comprehension,
and projection as well as the overall task performance. For that, we built upon the interaction design paradigm proposed by Walker et al.. On the one hand, our design augments the robot to support picking-tasks; and, on the other hand, we augment the environment to support placing-tasks. We evaluated our design in a first user study, and results point to specific design aspects that need improvement while showing promise for the overall approach, in particular regarding user satisfaction and certain SA concepts. |
2019
|
Arévalo-Arboleda, Stephanie; Miller, Stanislaw; Janka, Martha; Gerken, Jens What's behind a choice? Understanding Modality Choices under Changing Environmental Conditions InproceedingsMIA ICMI '19 2019 International Conference on Multimodal Interaction, S. 291-301, 2019, ISBN: 978-1-4503-6860-5. Abstract | BibTeX | Links:   @inproceedings{Arévalo-Arboleda2019,
title = {What's behind a choice? Understanding Modality Choices under Changing Environmental Conditions},
author = {Stephanie Arévalo-Arboleda and Stanislaw Miller and Martha Janka and Jens Gerken},
url = {https://hci.w-hs.de/pub_whatsbehindachoiceunderstandingmodalitychoicesunderchangingenvironmentalconditions/, PDF Download},
doi = {10.1145/3340555.3353717},
isbn = { 978-1-4503-6860-5},
year = {2019},
date = {2019-10-14},
booktitle = {ICMI '19 2019 International Conference on Multimodal Interaction},
pages = {291-301},
abstract = {Interacting with the physical and digital environment multimodally enhances user flexibility and adaptability to different scenarios. A body of research has focused on comparing the efficiency and effectiveness of different interaction modalities in digital environments. However, little is known about user behavior in an environment that provides freedom to choose from a range of modalities. That is why, we take a closer look at the factors that influence input modality choices. Building on the work by Jameson & Kristensson, our goal is to understand how different factors influence user choices. In this paper, we present a study that aims to explore modality choices in a hands-free interaction environment, wherein participants can choose and combine freely three hands-free modalities (Gaze, Head movements, Speech) to execute point and select actions in a 2D interface. On the one hand, our results show that users avoid switching modalities more often than we expected, particularly, under conditions that should prompt modality switching. On the other hand, when users make a modality switch, user characteristics and consequences of the experienced interaction have a higher impact on the choice, than the changes in environmental conditions. Further, when users switch between modalities, we identified different types of switching behaviors. Users who deliberately try to find and choose an optimal modality (single switcher), users who try to find optimal combinations of modalities (multiple switcher), and a switching behavior triggered by error occurrence (error biased switcher). We believe that these results help to further understand when and how to design for multimodal interaction in real-world systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Interacting with the physical and digital environment multimodally enhances user flexibility and adaptability to different scenarios. A body of research has focused on comparing the efficiency and effectiveness of different interaction modalities in digital environments. However, little is known about user behavior in an environment that provides freedom to choose from a range of modalities. That is why, we take a closer look at the factors that influence input modality choices. Building on the work by Jameson & Kristensson, our goal is to understand how different factors influence user choices. In this paper, we present a study that aims to explore modality choices in a hands-free interaction environment, wherein participants can choose and combine freely three hands-free modalities (Gaze, Head movements, Speech) to execute point and select actions in a 2D interface. On the one hand, our results show that users avoid switching modalities more often than we expected, particularly, under conditions that should prompt modality switching. On the other hand, when users make a modality switch, user characteristics and consequences of the experienced interaction have a higher impact on the choice, than the changes in environmental conditions. Further, when users switch between modalities, we identified different types of switching behaviors. Users who deliberately try to find and choose an optimal modality (single switcher), users who try to find optimal combinations of modalities (multiple switcher), and a switching behavior triggered by error occurrence (error biased switcher). We believe that these results help to further understand when and how to design for multimodal interaction in real-world systems. |
2018
|
Arévalo-Arboleda, Stephanie; Pascher, Max; Gerken, Jens Opportunities and Challenges in Mixed-Reality for an Inclusive Human-Robot Collaboration Environment InproceedingsMIAMobILe Proceedings of the 2018 International Workshop on Virtual, Augmented, and Mixed Reality for Human-Robot Interactions (VAM-HRI) as part of the ACM/IEEE Conference on Human-Robot Interaction, S. 83–86, Chicago, USA, 2018. Abstract | BibTeX | Links:  @inproceedings{Arboleda2018,
title = {Opportunities and Challenges in Mixed-Reality for an Inclusive Human-Robot Collaboration Environment},
author = {Stephanie Arévalo-Arboleda and Max Pascher and Jens Gerken},
url = {https://hci.w-hs.de/pub_opportunities_and_challenges_in_mixed-reality_for_an_inclusive_human-robot_collaboration_environment/, PDF Download},
year = {2018},
date = {2018-01-01},
booktitle = {Proceedings of the 2018 International Workshop on Virtual, Augmented, and Mixed Reality for Human-Robot Interactions (VAM-HRI) as part of the ACM/IEEE Conference on Human-Robot Interaction},
pages = {83--86},
address = {Chicago, USA},
abstract = {This paper presents an approach to enhance robot control using Mixed-Reality. It highlights the opportunities and challenges in the interaction design to achieve a Human-Robot Collaborative environment. In fact, Human-Robot Collaboration is the perfect space for social inclusion. It enables people, who suffer severe physical impairments, to interact with the environment by providing them movement control of an external robotic arm. Now, when discussing about robot control it is important to reduce the visual-split that different input and output modalities carry. Therefore, Mixed-Reality is of particular interest when trying to ease communication between humans and robotic systems.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This paper presents an approach to enhance robot control using Mixed-Reality. It highlights the opportunities and challenges in the interaction design to achieve a Human-Robot Collaborative environment. In fact, Human-Robot Collaboration is the perfect space for social inclusion. It enables people, who suffer severe physical impairments, to interact with the environment by providing them movement control of an external robotic arm. Now, when discussing about robot control it is important to reduce the visual-split that different input and output modalities carry. Therefore, Mixed-Reality is of particular interest when trying to ease communication between humans and robotic systems. |