This website uses cookies that store information about your usage of the page. By continuing to use this page you confirm you are happy with that.
Review and change how cookies are used.
Patrick has authored 57 peer-reviewed scientific papers with more than 120 co-authors, including 9 journal articles, 39 conference and 9 workshop contributions. According to Google Scholar (23 October 2024), his work has been cited 451 times (h-index: 12).
@article{Ayub2024a,
author = "Ayub, Ali and Francesco, Zachary and Holthaus, Patrick and Nehaniv, Chrystopher L. and Dautenhahn, Kerstin",
title = "{Continual Learning through Human-Robot Interaction -- Human Perceptions of a Continual Learning Robot in Repeated Interactions}",
journal = "International Journal of Social Robotics",
year = "under review",
doi = "10.48550/arXiv.2305.16332"
}
@article{Riches2024a,
author = "Riches, Lewis and Koay, Kheng Lee and Holthaus, Patrick",
title = "{Privacy in Robotics: Investigating the Acceptability of Personal Data Knowledge and Perceived Risk of Disclosure}",
journal = "International Journal of Social Robotics",
year = "under review"
}
@article{Ayub2024b,
author = "Ayub, Ali and De Francesco, Zachary and Mehta, Jainish and Agha, Khaled Yaakoub and Holthaus, Patrick and Nehaniv, Chrystopher L. and Dautenhahn, Kerstin",
title = "{A Human-Centered View of Continual Learning: Understanding Interactions, Teaching Patterns, and Perceptions of Human Users Towards a Continual Learning Robot in Repeated Interactions}",
journal = "Transactions on Human-Robot Interaction",
year = "2024",
publisher = "Association for Computing Machinery",
doi = "10.1145/3659110",
abstract = "Continual learning (CL) has emerged as an important avenue of research in recent years, at the intersection of Machine Learning (ML) and Human-Robot Interaction (HRI), to allow robots to continually learn in their environments over long-term interactions with humans. Most research in continual learning, however, has been robot-centered to develop continual learning algorithms that can quickly learn new information on systematically collected static datasets. In this paper, we take a human-centered approach to continual learning, to understand how humans interact with, teach, and perceive continual learning robots over the long term, and if there are variations in their teaching styles. We developed a socially guided continual learning system that integrates CL models for object recognition with a mobile manipulator robot and allows humans to directly teach and test the robot in real time over multiple sessions. We conducted an in-person study with 60 participants who interacted with the continual learning robot in 300 sessions with 5 sessions per participant. In this between-participant study, we used three different CL models deployed on a mobile manipulator robot. An extensive qualitative and quantitative analysis of the data collected in the study shows that there is significant variation among the teaching styles of individual users indicating the need for personalized adaptation to their distinct teaching styles. Our analysis shows that the constrained experimental setups that have been widely used to test most CL models are not adequate, as real users interact with and teach continual learning robots in a variety of ways. Finally, our analysis shows that although users have concerns about continual learning robots being deployed in our daily lives, they mention that with further improvements continual learning robots could assist older adults and people with disabilities in their homes.",
annotation = "selected paper",
volume = "13",
number = "4",
pages = "1--39"
}
@article{Foerster2023a,
author = "Förster, Frank and Romeo, Marta and Holthaus, Patrick and Wood, Luke and Dondrup, Christian and Fischer, Joel E. and Ferdousi Liza, Farhana and Kaszuba, Sara and Hough, Julian and Nesset, Birthe and Hernández García, Daniel and Kontogiorgios, Dimosthenis and Williams, Jennifer and Özkan, Elif Ecem and Barnard, Pepita and Berumen, Gustavo and Price, Dominic and Cobb, Sue and Witschko, Martina and Tisserand, Lucien and Porcheron, Martin and Giuliani, Manuel and Skantze, Gabriel and Healey, Patrick and Papaioannou, Ioannis and Gkatzia, Dimitra and Albert, Saul and Huang, Guanyu and Maraev, Vladislav and Epaminondas, Kapetanios",
title = "Working with Troubles and Failures in Conversation between Humans and Robots: Workshop Report",
journal = "Frontiers in Robotics and AI",
abstract = {This paper summarizes the structure and findings from the first Workshop on Troubles and Failures in Conversations between Humans and Robots. The workshop was organized to bring together a small, interdisciplinary group of researchers working on miscommunication from two complementary perspectives. One group of technology-oriented researchers was made up of roboticists, Human-Robot Interaction (HRI) researchers and dialogue system experts. The second group involved experts from conversation analysis, cognitive science, and linguistics. Uniting both groups of researchers is the belief that communication failures between humans and machines need to be taken seriously and that a systematic analysis of such failures may open fruitful avenues in research beyond current practices to improve such systems, including both speech-centric and multimodal interfaces. This workshop represents a starting point for this endeavour. The aim of the workshop was threefold:Firstly, to establish an interdisciplinary network of researchers that share a common interest in investigating communicative failures with a particular view towards robotic speech interfaces; secondly, to gain a partial overview of the "failure landscape" as experienced by roboticists and HRI researchers; and thirdly, to determine the potential for creating a robotic benchmark scenario for testing future speech interfaces with respect to the identified failures. The present article summarizes both the "failure landscape" surveyed during the workshop as well as the outcomes of the attempt to define a benchmark scenario.},
year = "2023",
volume = "10",
doi = "10.3389/frobt.2023.1202306"
}
@incollection{Menon2023,
author = "Menon, Catherine and Carta, Silvio and Förster, Frank and Holthaus, Patrick",
editor = "Holzinger, Andreas and Plácido da Silva, Hugo and Vanderdonckt, Jean and Constantine, Larry",
title = "{Improving Public Engagement with Ethical Complexities of Assistive Robots}",
booktitle = "Computer-Human Interaction Research and Applications 2021 and 2022, revised selected papers",
series = "Communications in Computer and Information Science",
year = "2023",
publisher = "Springer Cham",
volume = "1882",
pages = "71--94",
doi = "10.1007/978-3-031-41962-1\_4"
}
@article{Holthaus2021a,
author = "Holthaus, Patrick and Wachsmuth, Sven",
title = "{It was a Pleasure Meeting You - Towards a Holistic Model of Human-Robot Encounters}",
journal = "International Journal of Social Robotics",
issn = "1875-4805",
year = "2021",
volume = "13",
number = "7",
pages = "1729--1745",
abstract = "Social signals are commonly used to facilitate the usability of humanoid robots. While providing the robot with an extended expressibility, these signals are often applied only in structured interactions where parts of the familiarization or farewell processes are disregarded in the evaluation. In order to establish the consideration of a more comprehensive view, this article presents a holistic model of human encounters with a social robot. We propose and discuss particular robot signals, which aim to express the robot's social awareness, for each of the model's phases. We present an interaction study with humans that are inexperienced in interacting with robots to investigate the effects of these signals. Results verify that the implementation of proposed signals is beneficial for the participants' user experience. The study further reveals a strong interdependency of a robot's social signals and the importance of addressing entire encounters in human-robot interaction studies.",
doi = "10.1007/s12369-021-00759-9",
annotation = "selected paper"
}
@article{Schulz2021,
author = "Schulz, Trenton and Soma, Rebekka and Holthaus, Patrick",
title = "{Movement Acts in Breakdown Situations - How a Robot's Recovery Procedure Affects Participants' Opinions}",
journal = "Paladyn, Journal of Behavioral Robotics: Special Issue Trust, Acceptance and Social Cues in Robot Interaction",
year = "2021",
number = "1",
volume = "12",
pages = "336--355",
abstract = "Recovery procedures are targeted at correcting issues encountered by robots. What are people's opinions of a robot during these recovery procedures? During an experiment that examined how a mobile robot moved, the robot would unexpectedly pause or rotate itself to recover from a navigation problem. The serendipity of the recovery procedure and people's understanding of it became a case study to examine how future study designs could consider breakdowns better and look at suggestions for better robot behaviors in such situations. We present the original experiment with the recovery procedure. We then examine the responses from the participants in this experiment qualitatively to see how they interpreted the breakdown situation when it occurred. Responses could be grouped into themes of sentience, competence, and the robot's forms. The themes indicate that the robot's movement communicated different information to different participants. This leads us to introduce the concept of movement acts to help examine the explicit and implicit parts of communication in movement. Given that we developed the concept looking at an unexpected breakdown, we suggest that researchers should plan for the possibility of breakdowns in experiments and examine and report people's experience around a robot breakdown to further explore unintended robot communication.",
doi = "10.1515/pjbr-2021-0027"
}
@article{Wrede2017,
author = "Wrede, Sebastian and Leichsenring, Christian and Holthaus, Patrick and Hermann, Thomas and Wachsmuth, Sven",
title = "{The Cognitive Service Robotics Apartment - A Versatile Environment for Human-Machine Interaction Research}",
journal = "KI - Künstliche Intelligenz: Special Issue on Smart Environments",
year = "2017",
volume = "31",
number = "3",
pages = "299--304",
doi = "10.1007/s13218-017-0492-x",
abstract = "The emergence of cognitive interaction technology offering intuitive and personalized support for humans in daily routines is essential for the success of future smart environments. Social robotics and ambient assisted living are well-established, active research fields but in the real world the number of smart environments that support humans efficiently on a daily basis is still rather low. We argue that research on ambient intelligence and human-robot interaction needs to be conducted in a a strongly interdisciplinary process to facilitate seamless integration of assistance technologies into the users' daily lives. With the Cognitive Service Robotics Apartment (CSRA), we are developing a novel kind of laboratory following this interdisciplinary approach. It combines a smart home with ambient intelligence functionalities with a cognitive social robot with advanced manipulation capabilities to explore the all day use of cognitive interaction technology for human assistance. This lab in conjunction with our development approach opens up new lines of inquiry and allows to address new research questions in human machine, -agent and -robot interaction."
}
@article{Holthaus2011,
author = "Holthaus, Patrick and Pitsch, Karola and Wachsmuth, Sven",
title = "{How Can I Help? - Spatial Attention Strategies for a Receptionist Robot}",
journal = "International Journal of Social Robotics",
year = "2011",
volume = "3",
number = "4",
pages = "383--393",
doi = "10.1007/s12369-011-0108-9",
issn = "1875-4791",
abstract = "Social interaction between humans takes place in the spatial environment on a daily basis. We occupy space for ourselves and respect the dynamics of spaces that are occupied by others. In human-robot interaction, spatial models are commonly used for structuring relatively far-away interactions or passing-by scenarios. This work instead, focuses on the transition between distant and close communication for an interaction opening. We applied a spatial model to a humanoid robot and implemented an attention system that is connected to it. The resulting behaviors have been verified in an online video study. The questionnaire revealed that these behaviors are applicable and result in a robot that has been perceived as more interested in the human and shows its attention and intentions earlier and to a higher degree than other strategies."
}
@inproceedings{Kadam2024,
author = "Kadam, Pushkar and Fang, Gu and Amirabdollahian, Farshid and Zou, Ju Jia and Holthaus, Patrick",
title = "{Hand Pose Detection Using YOLOv8-pose}",
year = "in press",
booktitle = "Conference on Engineering Informatics (ICEI 2024)",
address = "Melbourne, Australia",
publisher = "IEEE"
}
@inproceedings{Marvel2024a,
author = "Marvel, Jeremy and Virts, Ann and Bagchi, Shelly and Shrestha, Snehesh and Holthaus, Patrick and Senft, Emmanuel and Hernandez Garcia, Daniel",
title = "{The Road to Reliable Robots: Interpretable, Accessible, and Reproducible HRI Research}",
year = "in press",
booktitle = "International Conference on Human-Robot Interaction (HRI 2025)",
address = "Melbourne, Australia",
publisher = "ACM"
}
@inproceedings{Rafique2024b,
author = "Rafique, Sehrish and Amirabdollahian, Farshid and Fang, Gu and Holthaus, Patrick",
title = "{Human Presence Detection to Support Contextual Awareness in Ambient Assisted Living Scenarios}",
year = "in press",
booktitle = "2024 IEEE International Conference on Metrology for eXtended Reality, Artificial Intelligence and Neural Engineering (MetroXRAINE)",
address = "St. Albans, UK",
publisher = "IEEE"
}
@inproceedings{Bamorovat2024b,
author = "Bamorovat Abadi, Mohammad Hossein and Shahabian Alashti, Mohamad Reza and Menon, Catherine and Holthaus, Patrick and Amirabdollahian, Farshid",
title = "{Robotic Vision and Multi-View Synergy: Action and activity recognition in assisted living scenarios}",
booktitle = "International Conference on Biomedical Robotics and Biomechatronics (BioRob 2024)",
address = "Heidelberg, Germany",
year = "2024",
doi = "10.1109/BioRob60516.2024.10719749",
pages = "789--794",
publisher = "IEEE RAS/EMBS"
}
@inproceedings{DeLima2024a,
author = "De Lima, Carlos Baptista and Hough, Julian and Förster, Frank and Holthaus, Patrick and Zheng, Yongjun",
title = "{Improving Fluidity Through Action: A Proposal for a Virtual Reality Platform for Improving Real-World HRI}",
year = "2024",
booktitle = "International Conference on Human-Agent Interaction (HAI 2024)",
address = "Swansea, UK",
publisher = "ACM",
doi = "10.1145/3687272.3690881",
abstract = "Achieving truly fluid interaction with robots with speech interfaces remains a hard problem. Despite technical advances in sensors, processors and actuators, the experience of Human-Robot Interaction (HRI) remains laboured and frustrating. Some of the barriers to this stem from a lack of a suitable development platform for HRI to improve the interaction, particularly for mobile manipulator robots. In this paper we briefly overview some existing systems and propose a high-fidelity Virtual Reality (VR) HRI simulation environment with Wizard-of-Oz (WoZ) cabability applicable to multiple robots including mobile manipulators and social robots."
}
@inproceedings{Helal2024,
author = "Helal, Manal and Holthaus, Patrick and Wood, Luke and Velmurugan, Vignesh and Lakatos, Gabriella and Moros, Sílvia and Amirabdollahian, Farshid",
title = "{When the robotic Maths tutor is wrong - can children identify mistakes generated by ChatGPT?}",
booktitle = "International Conference on Artificial Intelligence, Robotics and Control (AIRC 2024)",
address = "Cairo, Egypt",
year = "2024",
doi = "10.1109/AIRC61399.2024.10672220",
pages = "83--90",
publisher = "IEEE"
}
@inproceedings{Holthaus2024a,
author = "Holthaus, Patrick and Fallahi, Ali and Förster, Frank and Menon, Catherine and Wood, Luke and Lakatos, Gabriella",
title = "{Agency Effects on Robot Trust in Different Age Groups}",
year = "2024",
booktitle = "International Conference on Human-Agent Interaction (HAI 2024)",
address = "Swansea, UK",
publisher = "ACM",
doi = "10.1145/3687272.3690903",
abstract = "Trust plays a major role when introducing interactive robots into people's personal spaces, which, in large part, depends on how they perceive the robot. This paper presents the initial results of an investigation into the perception of robot agency as a potential factor influencing trust. We manipulated a robot's agency to see how trust would change as a result. Our preliminary results indicate age as a confounding factor while we did not find differences when priming robot autonomy."
}
@inproceedings{Hough2024b,
author = "Hough, Julian and De Lima, Carlos Baptista and Förster, Frank and Holthaus, Patrick and Zheng, Yongjun",
title = "{HAI 2024 Workshop Proposal: Fluidity in Human-Agent Interaction}",
year = "2024",
booktitle = "International Conference on Human-Agent Interaction (HAI 2024)",
address = "Swansea, UK",
publisher = "ACM",
doi = "10.1145/3687272.3691361"
}
@inproceedings{Rafique2024a,
author = "Rafique, Sehrish and Amirabdollahian, Farshid and Arunachalam, Ganesh and Holthaus, Patrick",
title = "{Opportunities and Challenges in Implementing a Virtual Ward for Heart Failure Management}",
booktitle = "The Seventeenth International Conference on Advances in Computer-Human Interactions (ACHI 2024)",
year = "2024",
address = "Barcelona, Spain",
publisher = "IARIA",
isbn = "978-1-68558-163-3",
pages = "88--93",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=achi\_2024\_3\_50\_20035"
}
@inproceedings{Riches2024b,
author = "Riches, Lewis and Koay, Kheng Lee and Holthaus, Patrick",
title = "{Evaluating the Impact of a Personal Data Communication Policy in Human-Robot Interactions}",
booktitle = "The Seventeenth International Conference on Advances in Computer-Human Interactions (ACHI 2024)",
year = "2024",
address = "Barcelona, Spain",
publisher = "IARIA",
isbn = "978-1-68558-163-3",
pages = "123--128",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=achi\_2024\_3\_100\_20052"
}
@inproceedings{Shahabian2024,
author = "Shahabian Alashti, Mohamad Reza and Bamorovat Abadi, Mohammad Hossein and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "{Efficient Skeleton-based Human Activity Recognition in Ambient Assisted Living Scenarios with Multi-view CNN}",
booktitle = "International Conference on Biomedical Robotics and Biomechatronics (BioRob 2024)",
address = "Heidelberg, Germany",
year = "2024",
doi = "10.1109/BioRob60516.2024.10719939",
pages = "979--984",
publisher = "IEEE RAS/EMBS"
}
@inproceedings{Ayub2023b,
author = "Ayub, Ali and Mehta, Jainish and Francesco, Zachary and Holthaus, Patrick and Dautenhahn, Kerstin and Nehaniv, Chrystopher L.",
title = "{How do Human Users Teach a Continual Learning Robot in Repeated Interactions?}",
booktitle = "International Conference on Robot and Human Interactive Communication (RO-MAN 2023)",
abstract = "Continual learning (CL) has emerged as an important avenue of research in recent years, at the intersection of Machine Learning (ML) and Human-Robot Interaction (HRI), to allow robots to continually learn in their environments over long-term interactions with humans. Most research in continual learning, however, has been robot-centered to develop continual learning algorithms that can quickly learn new information on static datasets. In this paper, we take a human-centered approach to continual learning, to understand how humans teach continual learning robots over the long term and if there are variations in their teaching styles. We conducted an in-person study with 40 participants that interacted with a continual learning robot in 200 sessions. In this between-participant study, we used two different CL models deployed on a Fetch mobile manipulator robot. An extensive qualitative and quantitative analysis of the data collected in the study shows that there is significant variation among the teaching styles of individual users indicating the need for personalized adaptation to their distinct teaching styles. The results also show that although there is a difference in the teaching styles between expert and non-expert users, the style does not have an effect on the performance of the continual learning robot. Finally, our analysis shows that the constrained experimental setups that have been widely used to test most continual learning techniques are not adequate, as real users interact with and teach continual learning robots in a variety of ways.",
address = "Busan, Korea",
year = "2023",
pages = "1975 -- 1982",
doi = "10.1109/RO-MAN57019.2023.10309520",
publisher = "IEEE"
}
@inproceedings{Bagchi2023,
author = "Bagchi, Shelly and Holthaus, Patrick and Beraldo, Gloria and Senf, Emmanuel and Hernández García, Daniel and Han, Zhao and Jayaraman, Suresh Kumaar and Rossi, Alessandra and Esterwood, Connor and Andriella, Antonio and Pridham, Paul",
title = "{Towards Improved Replicability of Human Studies in Human-Robot Interaction: Recommendations for Formalized Reporting}",
booktitle = "International Conference on Human-Robot Interaction (HRI 2023)",
address = "Stockholm, Sweden",
year = "2023",
pages = "629--633",
publisher = "ACM/IEEE",
abstract = "In this paper, we present a proposed format for reporting human studies in Human-Robot Interaction (HRI). We specifically call out details which are often overlooked or left out of conference and journal papers due to space constraints, and propose a standardized format to contain those details in paper appendices. We expect that providing a formalized study reporting method will promote an increase in replicability and reproducibility of HRI studies, and encourage meta-analysis and review, ultimately increasing the generalizability and validity of HRI research. We consider our draft the first step towards these goals, and we release it to solicit feedback from the HRI community on the included topics.",
annotation = "best paper candidate",
doi = "10.1145/3568294.3580162"
}
@inproceedings{Bamorovat2023,
author = "Bamorovat Abadi, Mohammad Hossein and Shahabian Alashti, Mohamad Reza and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "{RHM: Robot House Multi-view Human Activity Recognition Dataset}",
booktitle = "The Sixteenth International Conference on Advances in Computer-Human Interactions (ACHI 2023)",
address = "Venice, Italy",
year = "2023",
publisher = "IARIA",
abstract = "With the recent increased development of deep neural networks and dataset capabilities, the Human Action Recognition (HAR) domain is growing rapidly in terms of both the available datasets and deep models. Despite this, there are some lacks of datasets specifically covering the Robotics field and Human-Robot interaction. We prepare and introduce a new multi-view dataset to address this. The Robot House Multi-View (RHM) dataset contains four views: Front, Back, Ceiling (Omni), and robot-views. There are 14 classes with 6701 video clips for each view, making a total of 26804 video clips for the four views. The lengths of the video clips are between 1 to 5 seconds. The videos with the same number and the same classes are synchronized in different views. In the second part of this paper, we consider how single streams afford activity recognition using established state-of-the-art models. We then assess the affordance for each view based on information theoretic modelling and mutual information concept. Furthermore, we benchmark the performance of different views, thus establishing the strengths and weaknesses of each view relevant to their information content and performance of the benchmark. Our results lead us to conclude that multi-view and multi-stream activity recognition has the added potential to improve activity recognition results. The RHM dataset is available at https://robothouse-dev.herts.ac.uk/datasets/RHM/HAR-1/",
isbn = "978-1-68558-078-0",
pages = "159--166",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=achi\_2023\_4\_160\_20077"
}
@incollection{Fujii2023,
author = "Fujii, Koyo and Holthaus, Patrick and Samani, Hooman and Premachandra, Chinthaka and Amirabdollahian, Farshid",
editor = "Ali, Abdulaziz Al and Cabibihan, John-John and Meskin, Nader and Rossi, Silvia and Jiang, Wanyue and He, Hongsheng and Ge, Shuzhi Sam",
title = "{Two-Level Reinforcement Learning Framework for Self-Sustained Personal Robots}",
booktitle = "International Conference on Social Robotics (ICSR 2023)",
series = "Lecture Notes in Computer Science",
volume = "14453",
address = "Doha, Qatar",
year = "2023",
pages = "363--372",
publisher = "Springer Singapore",
abstract = {As social robots become integral to daily life, effective battery management and personalized user interactions are crucial. We employed Q-learning with the Miro-E robot for balancing self-sustained energy management and personalized user engagement. Based on our approach, we anticipate that the robot will learn when to approach the charging dock and adapt interactions according to individual user preferences. For energy management, the robot underwent iterative training in a simulated environment, where it could opt to either "play" or "go to the charging dock". The robot thereby adapts its interaction style to a specific individual, learning which of three actions would be preferred based on feedback it would receive during real-world human-robot interactions. From an initial analysis, we identified a specific point at which the Q values are inverted, indicating the robot's potential establishment of a battery threshold that triggers its decision to head to the charging dock in the energy management scenario. Moreover, by monitoring the probability of the robot selecting specific behaviours during human-robot interactions over time, we expect to gather evidence that the robot can successfully tailor its interactions to individual users in the realm of personalized engagement.},
isbn = "978-981-99-8715-3",
doi = "10.1007/978-981-99-8715-3\_30"
}
@inproceedings{Foerster2023b,
author = "Förster, Frank and Romeo, Marta and Holthaus, Patrick and Nesset, Birthe and Galvez Trigo, Maria J. and Dondrup, Christian and Fischer, Joel E.",
title = "{Working with Troubles and Failures in Conversation Between Humans and Robots}",
booktitle = "Conversational User Interfaces (CUI)",
address = "Eindhoven, Netherlands",
year = "2023",
publisher = "ACM",
abstract = "In order to carry out human-robot collaborative tasks efficiently, robots have to be able to communicate with their human counterparts. In many applications, speech interfaces are deployed as a way to empower robots with the ability to communicate. Despite the progress made in speech recognition and (multi-modal) dialogue systems, such interfaces continue to be brittle in a number of ways and the experience of the failure of such interfaces is commonplace amongst roboticists. Surprisingly, a rigorous and complete analysis of communicative failures is still missing, and the technical literature is positively skewed towards the success and good performance of speech interfaces. In order to address this blind spot and investigate failures in conversations between humans and robots, an interdisciplinary effort is necessary. This workshop aims to raise awareness of said blind spot and provide a platform for discussing communicative troubles and failures in human-robot interactions and potentially related failures in non-robotic speech interfaces. We aim to bring together researchers studying communication in different fields, to start a scrupulous investigation into communicative failures, to begin working on a taxonomy of such failures, and enable a preliminary discussion on possible mitigating strategies. This workshop intends to be a venue where participants can freely discuss the failures they have encountered, to positively and constructively learn from them.",
doi = "10.1145/3571884.3597437"
}
@inproceedings{Holthaus2023,
author = "Holthaus, Patrick and Schulz, Trenton and Lakatos, Gabriella and Soma, Rebekka",
title = "{Communicative Robot Signals: Presenting a New Typology for Human-Robot Interaction}",
booktitle = "International Conference on Human-Robot Interaction (HRI 2023)",
address = "Stockholm, Sweden",
year = "2023",
publisher = "ACM/IEEE",
abstract = "We present a new typology for classifying signals from robots when they communicate with humans. For inspiration, we use ethology, the study of animal behaviour and previous efforts from literature as guides in defining the typology. The typology is based on communicative signals that consist of five properties: the origin where the signal comes from, the deliberateness of the signal, the signal's reference, the genuineness of the signal, and its clarity (i.e. how implicit or explicit it is). Using the accompanying worksheet, the typology is straightforward to use to examine communicative signals from previous human-robot interactions and provides guidance for designers to use the typology when designing new robot behaviours.",
annotation = "best paper candidate",
doi = "10.1145/3568162.3578631",
pages = "132--141"
}
@inproceedings{Lakatos2023,
author = "Lakatos, Gabriella and Sarda Gou, Marina and Holthaus, Patrick and Wood, Luke and Moros, Sílvia and Litchfield, Vicky and Robins, Ben and Amirabdollahian, Farshid",
title = "{A feasibility study of using Kaspar, a humanoid robot for speech and language therapy for children with learning disabilities}",
booktitle = "International Conference on Robot and Human Interactive Communication (RO-MAN 2023)",
abstract = "The research presented in this paper investigates the feasibility of using humanoid robots like Kaspar as assistive tools in Speech, Language and Communication (SLC) therapy for children with learning disabilities. The study aims to answer two research questions: RQ1. Can a social robot be used to improve SLC skills of children with learning disabilities? RQ2. What is the measurable impact of interacting with a humanoid robot on children with learning disability and SLC needs? A co-creation approach was followed, three therapeutic educational games were developed and implemented on the Kaspar robot in collaboration with experienced SLC experts. Twenty children from two different special educational needs schools participated in the games in 9 sessions over a period of 3 weeks. Results showed significant improvement in participants' SLC skills -- i.e. language comprehension and production skills -- over the intervention. Findings of this research affirms feasibility, suggesting that this type of robotic interaction is the right path to follow to help the children improve their SLC skills.",
address = "Busan, Korea",
year = "2023",
pages = "1233 -- 1238",
doi = "10.1109/RO-MAN57019.2023.10309615",
publisher = "IEEE"
}
@inproceedings{SardaGou2023,
author = "Sarda Gou, Marina and Lakatos, Gabriella and Holthaus, Patrick and Robins, Ben and Moros, Sílvia and Wood, Luke and Araujo, Hugo and deGraft-Hanson, Christine Augusta Ekua and Mousavi, Mohammad Reza and Amirabdollahian, Farshid",
title = "{Kaspar Explains: The Effect of Causal Explanations on Visual Perspective Taking Skills in Children with Autism Spectrum Disorder}",
booktitle = "International Conference on Robot and Human Interactive Communication (RO-MAN 2023)",
abstract = "This paper presents an investigation into the effectiveness of introducing explicit causal explanations in a child-robot interaction setting to help children with autism improve their Visual Perspective Taking (VPT) skills. A sample of ten children participated in three sessions with a social robot on different days, during which they played several games consisting of VPT tasks. In some of the sessions, the robot provided constructive feedback to the children by giving causal explanations related to VPT; other sessions were control sessions without explanations. An analysis of the children's learning progress revealed that they improved their VPT abilities faster when the robot provided causal explanations. However, both groups ultimately reach a similar ratio of correct answers in later sessions. These findings suggest that providing causal explanations using a social robot can be effective to teach VPT to children with autism. This study paves the way for further exploring a robot's ability to provide causal explanations in other educational scenarios.",
address = "Busan, Korea",
year = "2023",
pages = "1407 -- 1412",
doi = "10.1109/RO-MAN57019.2023.10309464",
publisher = "IEEE"
}
@inproceedings{Shahabian2023b,
author = "Shahabian Alashti, Mohamad Reza and Bamorovat Abadi, Mohammad Hossein and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "{Lightweight human activity recognition for ambient assisted living}",
booktitle = "The Sixteenth International Conference on Advances in Computer-Human Interactions (ACHI 2023)",
address = "Venice, Italy",
year = "2023",
publisher = "IARIA",
abstract = "Ambient Assisted Living (AAL) systems aim to improve the safety, comfort, and quality of life for the populations with specific attention given to prolonging personal independence during later stages of life. Human Activity Recognition (HAR) plays a crucial role in enabling AAL systems to recognise and understand human actions. Multi-view human activity recognition (MV-HAR) techniques are particularly useful for AAL systems as they can use information from multiple sensors to capture different perspectives of human activities and can help to improve the robustness and accuracy of activity recognition. In this work, we propose a lightweight activity recognition pipeline that utilizes skeleton data from multiple perspectives with the objective of enhancing an assistive robot's perception of human activity. The pipeline includes data sampling, spatial temporal data transformation, and representation and classification methods. This work contrasts a modified classic LeNet classification model (M-LeNet) versus a Vision Transformer (ViT) in detecting and classifying human activities. Both methods are evaluated using a multi-perspective dataset of human activities in the home (RHM-HAR-SK). Our results indicate that combining camera views can improve recognition accuracy. Furthermore, our pipeline provides an efficient and scalable solution in the AAL context, where bandwidth and computing resources are often limited.",
isbn = "978-1-68558-078-0",
pages = "188-193",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=achi\_2023\_4\_200\_20092"
}
@inproceedings{Shahabian2023a,
author = "Shahabian Alashti, Mohamad Reza and Bamorovat Abadi, Mohammad Hossein and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "{RHM-HAR-SK: A Multiview Dataset with Skeleton Data for Ambient Assisted Living Research}",
booktitle = "The Sixteenth International Conference on Advances in Computer-Human Interactions (ACHI 2023)",
address = "Venice, Italy",
year = "2023",
publisher = "IARIA",
abstract = "Human and activity detection has always been a vital task in Human-Robot Interaction (HRI) scenarios, such as those involving assistive robots. In particular, skeleton-based Human Activity Recognition (HAR) offers a robust and effective detection method based on human biomechanics. Recent advancements in human pose estimation have made it possible to extract skeleton positioning data accurately and quickly using affordable cameras. In interaction with a human, robots can therefore capture detailed information from a close distance and flexible perspective. However, recognition accuracy is susceptible to robot movements, where the robot often fails to capture the entire scene. To address this we propose the adoption of external cameras to improve the accuracy of activity recognition on a mobile robot. In support of this proposal, we present the dataset RHM-HAR-SK that combines multiple camera perspectives augmented with human skeleton extraction obtained by the HRNet pose estimation. We apply qualitative and quantitative analysis to the extracted skeleton and its joints to evaluate the coverage of extracted poses per camera perspective and activity. Results indicate that the recognition accuracy for the skeleton varies between camera perspectives and also joints, depending on the type of activity. The RHM-HAR-SK dataset is available at https://robothouse-dev.herts.ac.uk/datasets/RHM/HAR-SK",
isbn = "978-1-68558-078-0",
pages = "181--187",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=achi\_2023\_4\_190\_20087"
}
@incollection{Araujo2022,
author = "Araujo, Hugo and Holthaus, Patrick and Sarda Gou, Marina and Lakatos, Gabriella and Galizia, Giulia and Wood, Luke and Robins, Ben and Mousavi, Mohammad Reza and Amirabdollahian, Farshid",
editor = "Cavallo, Filippo and Cabibihan, John-John and Fiorini, Laura and Sorrentino, Alessandra and He, Hongsheng and Liu, Xiaorui and Matsumoto, Yoshio and Ge, Shuzhi Sam",
title = "{Kaspar Causally Explains: Causal Explanation in an Assistive Robot for Children with Autism Spectrum Disorder}",
booktitle = "International Conference on Social Robotics (ICSR 2022)",
series = "Lecture Notes in Computer Science",
volume = "13818",
address = "Florence, Italy",
year = "2022",
publisher = "Springer Cham",
abstract = "The Kaspar robot has been used with great success to work as an education and social mediator with children with autism spectrum disorder. Enabling the robot to automatically generate causal explanations is considered a key to enrich the interaction scenarios for children and thereby promote additional trust in the robot. To this end, we present a formal theory of causal explanation to be embedded in Kaspar. Based on this theory, we build a causal model and an efficient causal analysis method to calculate causal explanations. We implement our method using Java with inputs provided by a human operator. This model automatically generates the causal explanation that are then spoken by Kaspar. We validate our explanations for user satisfaction in an empirical evaluation.",
doi = "10.1007/978-3-031-24670-8\_9",
isbn = "978-3-031-24670-8",
pages = "85--99"
}
@incollection{Riches2022,
author = "Riches, Lewis and Koay, Kheng Lee and Holthaus, Patrick",
editor = "Cavallo, Filippo and Cabibihan, John-John and Fiorini, Laura and Sorrentino, Alessandra and He, Hongsheng and Liu, Xiaorui and Matsumoto, Yoshio and Ge, Shuzhi Sam",
title = "{Classification of personal data used by personalised robot companions based on concern of exposure}",
booktitle = "International Conference on Social Robotics (ICSR 2022)",
series = "Lecture Notes in Computer Science",
volume = "13817",
address = "Florence, Italy",
year = "2022",
publisher = "Springer Cham",
abstract = "We present a paper looking at the accidental exposure of personal data by personalised companion robots in human-robot interaction. Due to the need for personal data, personalisation brings inherent risk of accidental personal data exposure through multi-modal communication. An online questionnaire was conducted to collect perceptions on the level of concern of personal data being exposed. The personal data examined in this paper has been used to personalise a companion robot along with links to the UK general data protection act. The level of concern for these personal data has been classified into high, medium and low concern with guidelines provided on how these different classifications should be handled by a robot. Evidence has also been found that age, gender, extroversion and conscientiousness influence a person's perceptions on personal data exposure concern.",
doi = "10.1007/978-3-031-24667-8\_21",
isbn = "978-3-031-24667-8",
pages = "228--237"
}
@inproceedings{SardaGou2022,
author = "Sarda Gou, Marina and Lakatos, Gabriella and Holthaus, Patrick and Wood, Luke and Mousavi, Mohammad Reza and Robins, Ben and Amirabdollahian, Farshid",
title = "{Towards understanding causality - a retrospective study of scenarios that trigger explanations in interactions between a humanoid robot and autistic children}",
booktitle = "International Conference on Robot and Human Interactive Communication (RO-MAN 2022)",
address = "Naples, Italy",
year = "2022",
publisher = "IEEE",
abstract = "Children with Autism Spectrum Disorder (ASD) often struggle with visual perspective taking (VPT) skills and the understanding that others might have viewpoints and perspectives that are different from their own; i.e. the ability to understand what another individual can and cannot see, or the ability to understand that two or more people looking at the same object from different positions might not see the same thing. This could be improved by introducing explicit causal explanations in the interactions involving autistic children. The use of social robots can help autistic children improve their social skills and the robot Kaspar, used in this study, is a humanoid robot specifically designed to interact with children with ASD. The aim of this retrospective study is to define the initial protocol of a study on the effect of causal explanation provided by Kaspar on VPT. To this end, we investigate in which categories of behaviors or scenarios, causal explanations either by researchers or by Kaspar play a more significant role. The results have helped us identify multiple interaction categories that benefit from causal explanation, mostly involving VPT. We now use these results in order to create and validate behaviors and interactions that can help children with autism better understand VPT by exploiting causality.",
doi = "10.1109/RO-MAN53752.2022.9900660"
}
@inproceedings{BamorovatAbadi2021a,
author = "Bamorovat Abadi, Mohammad Hossein and Shahabian Alashti, Mohamad Reza and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "Affordable Robot Mapping using Omnidirectional Vision",
booktitle = "Proceedings of the 4th UK-RAS Conference: Robotics at Home (\\#UKRAS21)",
address = "Hatfield, UK",
year = "2021",
abstract = "Mapping is a fundamental requirement for robot navigation. In this paper, we introduce a novel visual mapping method that relies solely on a single omnidirectional camera. We present a metric that allows us to generate a map from the input image by using a visual Sonar approach. The combination of the visual sonars with the robot's odometry enables us to determine a relation equation and subsequently generate a map that is suitable for robot navigation. Results based on visual map comparison indicate that our approach is comparable with the established solutions based on RGB-D cameras or laser-based sensors. We now embark on evaluating our accuracy against the established methods.",
pages = "29--30",
doi = "10.31256/If7Nm5Z"
}
@inproceedings{BamorovatAbadi2021b,
author = "Bamorovat Abadi, Mohammad Hossein and Shahabian Alashti, Mohamad Reza and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "Robot House Human Activity Recognition Dataset",
booktitle = "Proceedings of the 4th UK-RAS Conference: Robotics at Home (\\#UKRAS21)",
address = "Hatfield, UK",
year = "2021",
abstract = "Human activity recognition is one of the most challenging tasks in computer vision. State-of-the art approaches such as deep learning techniques thereby often rely on large labelled datasets of human activities. However, currently available datasets are suboptimal for learning human activities in companion robotics scenarios at home, for example, missing crucial perspectives. With this as a consideration, we present the University of Hertfordshire Robot House Human Activity Recognition Dataset (RH-HAR-1). It contains RGB videos of a human engaging in daily activities, taken from four different cameras. Importantly, this dataset contains two non-standard perspectives: a ceiling-mounted fisheye camera and a mobile robot's view. In the first instance, RH-HAR-1 covers five daily activities with a total of more than 10,000 videos.",
pages = "19--20",
doi = "10.31256/Bw7Kt2N"
}
@inproceedings{ShahabianAlashti2021,
author = "Shahabian Alashti, Mohamad Reza and Bamorovat Abadi, Mohammad Hossein and Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
title = "Human activity recognition in RoboCup@home: Inspiration from online benchmarks",
booktitle = "Proceedings of the 4th UK-RAS Conference: Robotics at Home (\\#UKRAS21)",
address = "Hatfield, UK",
year = "2021",
abstract = "Human activity recognition is an important aspect of many robotics applications. In this paper, we discuss how well the RoboCup@home competition accounts for the importance of such recognition algorithms. Using public benchmarks as an inspiration, we propose to add a new task that specifically tests the performance of human activity recognition in this league. We suggest that human-robot interaction research in general can benefit from the addition of such a task as RoboCup@home is considered to accelerate, regulate, and consolidate the field.",
pages = "27--28",
doi = "10.31256/Os6Aw4Y"
}
@incollection{Rossi2020,
author = "Rossi, Alessandra and Dautenhahn, Kerstin and Koay, Kheng Lee and Walters, Michael L. and Holthaus, Patrick",
editor = "Wagner, Alan R. and Feil-Seifer, David and Haring, Kerstin S. and Rossi, Silvia and Williams, Thomas and He, Hongsheng and Ge, Shuzhi Sam",
title = "{Evaluating people's perceptions of trust in a robot in a repeated interactions study}",
booktitle = "International Conference on Social Robotics (ICSR 2020)",
address = "Golden, Colorado",
series = "Lecture Notes in Computer Science",
year = "2020",
volume = "12483",
publisher = "Springer Cham",
abstract = "Trust has been established to be a key factor in fostering human-robot interactions. However, trust can change overtime according different factors, including a breach of trust due to a robot's error. In this study, we observed people's interactions with a companion robot in a real house adapted for human-robot interaction experimentation over three weeks. The interactions happened in six day-scenarios in which a robot performed different tasks under two different conditions. Each condition included fourteen tasks performed by the robot, either correctly, or with errors with severe consequences on the first or last day of interaction. At the end of each experimental condition, participants were presented with an emergency scenario to evaluate their trust in the robot. We evaluated participants' trust in the robot by observing their decision to trust the robot during the emergency scenario, and by collecting their consideration through questionnaires. We concluded that there is a correlation between the timing of an error with severe consequences performed by the robot and the corresponding loss of trust of the human in the robot. In particular, people's trust is subjected to the initial mental formation.",
doi = "10.1007/978-3-030-62056-1\_38",
isbn = "978-3-030-62056-1"
}
@incollection{Holthaus2019,
author = "Holthaus, Patrick and Menon, Catherine and Amirabdollahian, Farshid",
editor = "Salichs, Miguel A. and Ge, Shuzhi Sam and Barakova, Emilia Ivanova and Cabibihan, John-John and Wagner, Alan R. and Castro-Gonz{\'a}lez, {\'A}lvaro and He, Hongsheng",
title = "{How a Robot's Social Credibility Affects Safety Performance}",
booktitle = "International Conference on Social Robotics (ICSR 2019)",
address = "Madrid, Spain",
series = "Lecture Notes in Computer Science",
year = "2019",
publisher = "Springer Cham",
pages = "740--749",
abstract = "This paper connects the two domains of HRI (Human-Robot Interaction) and safety engineering to ensure that the design of interactive robots considers an effect of social behaviours on safety functionality. We conducted a preliminary user study with a social robot that alerts participants during a puzzle-solving task to an environmental hazard. Our study findings show an indicative trend that users who were interrupted by a socially credible robot are more likely to act and mitigate the hazard than users interrupted by a robot lacking social credibility.",
doi = "10.1007/978-3-030-35888-4\_69",
isbn = "978-3-030-35888-4",
volume = "11876"
}
@inproceedings{Menon2019a,
author = "Menon, Catherine and Holthaus, Patrick",
title = "{Does a Loss of Social Credibility Impact Robot Safety? Balancing social and safety behaviours of assistive robots}",
booktitle = "International Conference on Performance, Safety and Robustness in Complex Systems and Applications (PESARO 2019)",
address = "Valencia, Spain",
year = "2019",
pages = "18--24",
publisher = "IARIA",
abstract = "This position paper discusses the safety-related functions performed by assistive robots and explores the re-lationship between trust and effective safety risk mitigation. We identify a measure of the robot’s social effectiveness, termed social credibility, and present a discussion of how social credibility may be gained and lost. This paper’s contribution is the identification of a link between social credibility and safety-related performance. Accordingly, we draw on analyses of existing systems to demonstrate how an assistive robot’s safety-critical functionality can be impaired by a loss of social credibility. In addition, we present a discussion of some of the consequences of prioritising either safety-related functionality or social engagement. We propose the identification of a mixed-criticality scheduling algorithm in order to maximise both safety-related performance and social engagement.",
isbn = "978-1-61208-698-9",
url = "https://www.thinkmind.org/index.php?view=article\&articleid=pesaro\_2019\_2\_10\_60021",
annotation = "best paper"
}
@inproceedings{Schulz2019b,
author = "Schulz, Trenton and Holthaus, Patrick and Amirabdollahian, Farshid and Koay, Kheng Lee",
title = "{Humans' Perception of a Robot Moving Using a Slow in and Slow Out Velocity Profile}",
booktitle = "International Conference on Human-Robot Interaction (HRI 2019)",
address = "Daegu, South Korea",
year = "2019",
pages = "594-595",
publisher = "ACM/IEEE",
abstract = "Humans need to understand and trust the robots they are working with. We hypothesize that how a robot moves can impact people's perception and their trust. We present a methodology for a study to explore people’s perception of a robot using the animation principle of slow in, slow out—to change the robot’s velocity profile versus a robot moving using a linear velocity profile. Study participants will interact with the robot within a home context to complete a task while the robot moves around the house. The participants’ perceptions of the robot will be recorded using the Godspeed Questionnaire. A pilot study shows that it is possible to notice the difference between the linear and the slow in, slow out velocity profiles, so the full experiment planned with participants will allow us to compare their perceptions based on the two observable behaviors.",
doi = "10.1109/HRI.2019.8673239"
}
@inproceedings{Schulz2019a,
author = "Schulz, Trenton and Holthaus, Patrick and Amirabdollahian, Farshid and Koay, Kheng Lee and Torresen, Jim and Herstad, Jo",
title = "{Differences of Human Perceptions of a Robot Moving using Linear or Slow in, Slow out Velocity Profiles When Performing a Cleaning Task}",
booktitle = "International Conference on Robot and Human Interactive Communication (RO-MAN 2019)",
address = "New Delhi, India",
year = "2019",
publisher = "IEEE",
abstract = "We investigated how a robot moving with different velocity profiles affects a person's perception of it when working together on a task. The two profiles are the standard linear profile and a profile based on the animation principles of slow in, slow out. The investigation was accomplished by running an experiment in a home context where people and the robot cooperated on a clean-up task. We used the Godspeed series of questionnaires to gather people’s perception of the robot. Average scores for each series appear not to be different enough to reject the null hypotheses, but looking at the component items provides paths to future areas of research. We also discuss the scenario for the experiment and how it may be used for future research into using animation techniques for moving robots and improving the legibility of a robot’s locomotion.",
doi = "10.1109/RO-MAN46459.2019.8956355",
issn = "1944-9445",
url = "https://www.dropbox.com/sh/6ok7yp1ouhmhoji/AAC60Yp7l9Ol\_hy8ia5LMOExa/papers?dl=0\&subfolder\_nav\_tracking=1"
}
@inproceedings{Rossi2018a,
author = "Rossi, Alessandra and Holthaus, Patrick and Dautenhahn, Kerstin and Koay, Kheng Lee and Walters, Michael L.",
title = "{Getting to know Pepper: Effects of people's awareness of a robot's capabilities on their trust in the robot}",
booktitle = "International Conference on Human-Agent Interaction (HAI 2018)",
pages = "246--252",
address = "Southampton, UK",
year = "2018",
publisher = "ACM",
doi = "10.1145/3284432.3284464",
abstract = "This work investigates how human awareness about a social robot's capabilities is related to trusting this robot to handle different tasks. We present a user study that relates knowledge on different quality levels to participant's ratings of trust. Secondary school pupils were asked to rate their trust in the robot after three types of exposures: a video demonstration, a live interaction, and a programming task. The study revealed that the pupils' trust is positively affected across different domains after each session, indicating that human users trust a robot more the more awareness about the robot they have."
}
@inproceedings{Michalski2017,
author = "Michalski, Timo and Pohling, Marian and Holthaus, Patrick",
title = "{Competitive Agents for Intelligent Home Automation}",
booktitle = "International Conference on Human-Agent Interaction (HAI 2017)",
address = "Bielefeld, Germany",
year = "2017",
pages = "527--531",
publisher = "ACM",
doi = "10.1145/3125739.3132616",
abstract = "Technologies that aim to achieve intelligent automation in smart homes typically involve either trigger-action pairs or machine learning. These, however, are often complex to configure or hard to comprehend for the user. To maximize automation efficiency while keeping the configuration simple and the effects comprehensible, we thus explore an alternative agent-based approach. With the help of a survey, we put together a set of intelligent agents that act autonomously in the environment. Conflicts between behaviors, identified with a secondary study, are thereby resolved with a competitive combination of agents. We finally present the draft of a user interface that allows for individual configuration of all agents."
}
@incollection{Bernotat2016,
author = "Bernotat, Jasmin and Schiffhauer, Birte and Eyssel, Friederike and Holthaus, Patrick and Leichsenring, Christian and Richter, Viktor and Pohling, Marian and Carlmeyer, Birte and Engelmann, Kai Frederic and Lier, Florian and Schulz, Simon and Bröhl, Rebecca and Seibel, Elena and Hellwig, Paul and Cimiano, Philipp and Kummert, Franz and Schlangen, David and Wagner, Petra and Hermann, Thomas and Wachsmuth, Sven and Wrede, Britta and Wrede, Sebastian",
editor = "Agah, Arvin and Cabibihan, John-John and Howard, Ayanna M. and Salichs, Miguel A. and He, Hongsheng",
title = {{Welcome to the future - How na{\"{i}}ve users intuitively address an intelligent robotics apartment}},
booktitle = "International Conference on Social Robotics (ICSR 2016)",
address = "Kansas City, USA",
series = "Lecture Notes in Computer Science",
pages = "982--992",
volume = "9979",
year = "2016",
doi = "10.1007/978-3-319-47437-3\_96",
isbn = "978-3-319-47437-3",
publisher = "Springer Berlin / Heidelberg",
abstract = {The purpose of this Wizard-of-Oz study was to explore the intuitive verbal and non-verbal goal-directed behavior of na{\"{i}}ve participants in an intelligent robotics apartment. Participants had to complete seven mundane tasks, for instance, they were asked to turn on the light. Participants were explicitly instructed to consider nonstandard ways of completing the respective tasks. A multi-method approach revealed that most participants favored speech and interfaces like switches and screens to communicate with the intelligent robotics apartment. However, they required instructions to use the interfaces in order to perceive them as competent targets for human-machine interaction. Hence, first important steps were taken to investigate how to design an intelligent robotics apartment in a user-centered and user-friendly manner.}
}
@inproceedings{Engelmann2016,
author = "Engelmann, Kai Frederic and Holthaus, Patrick and Wrede, Sebastian and Wrede, Britta",
title = "{An Interaction-Centric Dataset for Learning Automation Rules in Smart Homes}",
booktitle = "International Conference on Language Resources and Evaluation (LREC 2016)",
address = "Portorož, Slovenia",
year = "2016",
publisher = "European Language Resources Association (ELRA)",
isbn = "978-2-9517408-9-1",
url = "http://www.lrec-conf.org/proceedings/lrec2016/summaries/1014.html",
abstract = "The term smart home refers to a living environment that by its connected sensors and actuators is capable of providing intelligent and contextualised support to its user. This may result in automated behaviors that blends into the user’s daily life. However, currently most smart homes do not provide such intelligent support. A first step towards such intelligent capabilities lies in learning automation rules by observing the user’s behavior. We present a new type of corpus for learning such rules from user behavior as observed from the events in a smart homes sensor and actuator network. The data contains information about intended tasks by the users and synchronized events from this network. It is derived from interactions of 59 users with the smart home in order to solve five tasks. The corpus contains recordings of more than 40 different types of data streams and has been segmented and pre-processed to increase signal quality. Overall, the data shows a high noise level on specific data types that can be filtered out by a simple smoothing approach. The resulting data provides insights into event patterns resulting from task specific user behavior and thus constitutes a basis for machine learning approaches to learn automation rules."
}
@inproceedings{Holthaus2016b,
author = "Holthaus, Patrick and Hermann, Thomas and Wrede, Sebastian and Wachsmuth, Sven and Wrede, Britta",
title = "{1st International Workshop on Embodied Interaction with Smart Environments (Workshop Summary)}",
booktitle = "International Conference on Multimodal Interaction (ICMI 2016)",
address = "Tokyo, Japan",
year = "2016",
publisher = "ACM",
doi = "10.1145/2993148.3007628",
abstract = "The first workshop on embodied interaction with smart environments aims to bring together the very active community of multi-modal interaction research and the rapidly evolving field of smart home technologies. Besides addressing the software architecture of such very complex systems, it puts an emphasis on questions regarding an intuitive interaction with the environment. Thereby, especially the role of agency leads to interesting challenges in the light of user interactions. We therefore encourage a lively discussion on the design and concepts of social robots and virtual avatars as well as innovative ambient devices and their implementation into smart environments."
}
@inproceedings{Holthaus2016a,
author = "Holthaus, Patrick and Leichsenring, Christian and Bernotat, Jasmin and Richter, Viktor and Pohling, Marian and Carlmeyer, Birte and Köster, Norman and zu Borgsen, Sebastian Meyer and Zorn, René and Schiffhauer, Birte and Engelmann, Kai Frederic and Lier, Florian and Schulz, Simon and Cimiano, Philipp and Eyssel, Friederike and Herrmann, Thomas and Kummert, Franz and Schlangen, David and Wachsmuth, Sven and Wagner, Petra and Wrede, Britta and Wrede, Sebastian",
title = "{How to Address Smart Homes with a Social Robot? A Multi-modal Corpus of User Interactions with an Intelligent Environment}",
booktitle = "International Conference on Language Resources and Evaluation (LREC 2016)",
address = "Portorož, Slovenia",
year = "2016",
publisher = "European Language Resources Association (ELRA)",
isbn = "978-2-9517408-9-1",
url = "http://www.lrec-conf.org/proceedings/lrec2016/summaries/1046.html",
abstract = "In order to explore intuitive verbal and non-verbal interfaces in smart environments we recorded user interactions with an intelligent apartment. Besides offering various interactive capabilities itself, the apartment is also inhabited by a social robot that is available as a humanoid interface. This paper presents a multi-modal corpus that contains goal-directed actions of naive users in attempts to solve a number of predefined tasks. Alongside audio and video recordings, our data-set consists of large amount of temporally aligned sensory data and system behavior provided by the environment and its interactive components. Non-verbal system responses such as changes in light or display contents, as well as robot and apartment utterances and gestures serve as a rich basis for later in-depth analysis. Manual annotations provide further information about meta data like the current course of study and user behavior including the incorporated modality, all literal utterances, language features, emotional expressions, foci of attention, and addressees."
}
@inproceedings{Holthaus2014a,
author = "Holthaus, Patrick and Wachsmuth, Sven",
title = "{The Receptionist Robot}",
booktitle = "International Conference on Human-Robot Interaction (HRI 2014)",
address = "Bielefeld, Germany",
year = "2014",
pages = "329--329",
publisher = "ACM/IEEE",
doi = "10.1145/2559636.2559784",
abstract = "In this demonstration, a humanoid robot interacts with an interlocutor through speech and gestures in order to give directions on a map. The interaction is specifically designed to provide an enhanced user experience by being aware of non-verbal social signals. Therefore, we take spatial communicative cues into account and to react to them accordingly."
}
@inproceedings{Holthaus2013,
author = "Holthaus, Patrick and Wachsmuth, Sven",
title = "{Direct On-Line Imitation of Human Faces with Hierarchical ART Networks}",
booktitle = "International Symposium on Robot and Human Interactive Communication (RO-MAN 2013)",
address = "Gyeongju, South Korea",
pages = "370--371",
year = "2013",
publisher = "IEEE",
doi = "10.1109/ROMAN.2013.6628502",
abstract = "This work-in-progress paper presents an on-line system for robotic heads capable of mimicking humans. The marker-less method solely depends on the interactant's face as an input and does not use a set of basic emotions and is thus capable of displaying a large variety of facial expressions. A preliminary evaluation assigns solid performance with potential for improvement."
}
@inproceedings{Holthaus2012,
author = "Holthaus, Patrick and Wachsmuth, Sven",
title = "{Active Peripersonal Space for More Intuitive HRI}",
booktitle = "International Conference on Humanoid Robots (HUMANOIDS 2012)",
address = "Osaka, Japan",
pages = "508--513",
year = "2012",
publisher = "IEEE-RAS",
doi = "10.1109/HUMANOIDS.2012.6651567",
abstract = "In face-to-face interaction, humans coordinate actions in their surroundings with the help of a well structured spatial representation. For example on a dinner table, everybody exactly knows which objects belong to her and where she is allowed to grasp. To have robots, e.g. receptionists, act accordingly, we conducted an on-line survey about the expectations humans have while interacting with such a robot. Results indicate that humans attribute the robot handedness and an awareness of distance and territoriality in its own peripersonal space. In order to align a robot's behavior to these expectations, we have have developed a first spatial representation of the robots peripersonal space."
}
@inproceedings{Hegel2011,
author = "Hegel, Frank and Gieselmann, Sebastian and Peters, Annika and Holthaus, Patrick and Wrede, Britta",
title = "{Towards a Typology of Meaningful Signals and Cues in Social Robotics}",
booktitle = "International Symposium on Robot and Human Interactive Communication (RO-MAN 2011)",
address = "Atlanta, Georgia",
year = "2011",
pages = "72--78",
publisher = "IEEE",
doi = "10.1109/ROMAN.2011.6005246",
abstract = "In this paper, we present a first step towards a typology of relevant signals and cues in human-robot interaction (HRI). In human as well as in animal communication systems, signals and cues play an important role for senders and receivers of such signs. In our typology, we systematically distinguish between a robot's signals and cues which are either designed to be human-like or artificial to create meaningful information. Subsequently, developers and designers should be aware of which signs affect a user's judgements on social robots. For this reason, we first review several signals and cues that have already been successfully used in HRI with regard to our typology. Second, we discuss crucial human-like and artificial cues which have so far not been considered in the design of social robots - although they are highly likely to affect a user's judgement of social robots."
}
@incollection{Holthaus2010,
author = "Holthaus, Patrick and Lütkebohle, Ingo and Hanheide, Marc and Wachsmuth, Sven",
editor = "Ge, Shuzhi and Li, Haizhou and Cabibihan, John-John and Tan, Yeow",
title = "{Can I Help You? A Spatial Attention System for a Receptionist Robot}",
booktitle = "International Conference on Social Robotics (ICSR 2010)",
address = "Singapore",
series = "Lecture Notes in Computer Science",
pages = "325--334",
volume = "6414",
year = "2010",
doi = "10.1007/978-3-642-17248-9\_34",
isbn = "978-3-642-17247-2",
publisher = "Springer Berlin / Heidelberg",
abstract = "Social interaction between humans takes place in the spatial dimension on a daily basis. We occupy space for ourselves and respect the dynamics of spaces that are occupied by others. In human-robot interaction, the focus has been on other topics so far. Therefore, this work applies a spatial model to a humanoid robot and implements an attention system that is connected to it. The resulting behaviors have been verified in an on-line video study. The questionnaire revealed that these behaviors are applicable and result in a robot that has been perceived as more interested in the human and shows its attention and intentions to a higher degree.",
annotation = "best paper"
}
@inproceedings{Hough2024a,
author = "Hough, Julian and Baptista De Lima, Carlos and Förster, Frank and Holthaus, Patrick and Zheng, Yongjun",
title = "{FLUIDITY: Defining, measuring and improving fluidity in human-robot dialogue in virtual and real-world settings}",
booktitle = "SemDial 2024 -TrentoLogue (The 28th Workshop on the Semantics and Pragmatics of Dialogue)",
year = "2024",
url = "https://www.semdial.org/anthology/papers/Z/Z24/Z24-4044/"
}
@inproceedings{Menon2024,
author = "Menon, Catherine and Rainer, Austen and Holthaus, Patrick and Lakatos, Gabriella and Carta, Silvio",
title = "{EHAZOP: A Proof of Concept Ethical Hazard Analysis of an Assistive Robot}",
booktitle = "ICRA 2024 Workshop on Robot Ethics - WOROBET",
year = "2024",
abstract = "The use of assistive robots in domestic environments can raise significant ethical concerns, from the risk of individual ethical harm to wider societal ethical impacts including culture flattening and compromise of human dignity. It is therefore essential to ensure that technological development of these robots is informed by robust and inclusive techniques for mitigating ethical concerns. This paper presents EHAZOP, a method for conducting an ethical hazard analysis on an assistive robot. EHAZOP draws upon collaborative, creative and structured processes originating within safety engineering, using these to identify ethical concerns associated with the operation of a given assistive robot. We present the results of a proof of concept study of EHAZOP, demonstrating the potential for this process to identify diverse ethical hazards in these systems.",
url = "https://arxiv.org/abs/2406.09239"
}
@inproceedings{Ayub2023d,
author = "Ayub, Ali and De Francesco, Zachary and Holthaus, Patrick and Nehaniv, Chrystopher L. and Dautenhahn, Kerstin",
title = "{Human Perceptions of Task Load and Trust when Interactively Teaching a Continual Learning Robot}",
booktitle = "CVPR 2023 Workshop on on Continual Learning in Computer Vision (CLVision)",
address = "New Orleans, USA",
year = "2023",
abstract = "Although machine learning models for continual learning can mitigate forgetting on static, systematically collected datasets, it is unclear how human users might perceive a robot that continually learns over multiple interactions with them. In this paper, we developed a system that integrates CL models for object recognition with a Fetch mobile manipulator robot and allows humans to directly teach and test the robot over multiple sessions. We conducted an in-person between-subject study with two CL models and 40 participants that interacted with our system in 200 sessions (5 sessions per participant). Our results indicate that state-ofthe-art CL models might perform unreliably when applied on robots interacting with human participants. Our results also suggest that participants' trust in a continual learning robot significantly decreases over multiple sessions if the robot forgets previously learned objects. However, the perceived task load on participants for teaching and testing the robot remains low for all sessions, indicating the feasibility of continual learning robots in the real world.",
url = "https://sites.google.com/view/clvision2023/call-for-papers/accepted-papers\#h.j870cl7za6q7"
}
@inproceedings{Helal2023,
author = "Helal, Manal and Holthaus, Patrick and Lakatos, Gabriella and Amirabdollahian, Farshid",
title = "Chat Failures and Troubles: Reasons and Solutions",
booktitle = "CUI 2023 Workshop on Working with Trouble and Failures in conversation between humans and robots - WTF",
year = "2023",
abstract = "This paper examines some common problems in Human-Robot Interaction (HRI) causing failures and troubles in Chat. A given use case's design decisions start with the suitable robot, the suitable chatting model, identifying common problems that cause failures, identifying potential solutions, and planning continuous improvement. In conclusion, it is recommended to use a closed-loop control algorithm that guides the use of trained Artificial Intelligence pre-trained models and provides vocabulary filtering, re-train batched models on new datasets, learn online from data streams, and/or use reinforcement learning models to self-update the trained models and reduce errors.",
doi = "10.48550/arXiv.2309.03708"
}
@inproceedings{Holthaus2023b,
author = "Holthaus, Patrick and Rossi, Alessandra",
title = "{Common (good) practices measuring trust in HRI}",
booktitle = "RO-MAN 2023 Workshop on Trust, Acceptance and Social Cues in Human-Robot Interaction - SCRITA",
year = "2023",
abstract = "Trust in robots is widely believed to be imperative for the adoption of robots into people's daily lives. It is, therefore, understandable that the literature of the last few decades focuses on measuring how much people trust robots -- and more generally, any agent - to foster such trust in these technologies. Researchers have been exploring how people trust robot in different ways, such as measuring trust on human-robot interactions (HRI) based on textual descriptions or images without any physical contact, during and after interacting with the technology. Nevertheless, trust is a complex behaviour, and it is affected and depends on several factors, including those related to the interacting agents (e.g. humans, robots, pets), itself (e.g. capabilities, reliability), the context (e.g. task), and the environment (e.g. public spaces vs private spaces vs working spaces). In general, most roboticists agree that insufficient levels of trust lead to a risk of disengagement while over-trust in technology can cause over-reliance and inherit dangers, for example, in emergency situations. It is, therefore, very important that the research community has access to reliable methods to measure people's trust in robots and technology. In this position paper, we outline current methods and their strengths, identify (some) weakly covered aspects and discuss the potential for covering a more comprehensive amount of factors influencing trust in HRI.",
url = "https://arxiv.org/abs/2311.12182"
}
@inproceedings{Holthaus2021c,
author = "Holthaus, Patrick",
title = "How does a robot's social credibility relate to its perceived trustworthiness?",
booktitle = "RO-MAN 2021 Workshop on Trust, Acceptance and Social Cues in Human-Robot Interaction - SCRITA",
year = "2021",
abstract = "This position paper aims to highlight and discuss the role of a robot's social credibility in interaction with humans. In particular, I want to explore a potential relation between social credibility and a robot's acceptability and ultimately its trustworthiness. I thereby also review and expand the notion of social credibility as a measure of how well the robot obeys social norms during interaction with the concept of conscious acknowledgement.",
url = "https://arxiv.org/abs/2107.08805"
}
@inproceedings{Schulz2019c,
author = "Schulz, Trenton and Holthaus, Patrick",
title = "{Moving Robots Using the Slow in and Slow out Animation Principle}",
booktitle = "HRI 2019 Workshop on Expressivity for Sustained Human-Robot Interaction",
address = "Daegu, South Korea",
year = "2019",
abstract = "This extended abstract discusses the work done to move robots using the slow in, slow out animation principle and the set up for an experiment that has recently been completed. Results are still being analyzed."
}
@inproceedings{Schulz2019d,
author = "Schulz, Trenton and Soma, Rebekka and Holthaus, Patrick",
title = "Stuck on You: How a Stuck Robot Affects Participants' Opinions",
booktitle = "RO-MAN 2019 Workshop on Trust, Acceptance and Social Cues in Robot Interaction - SCRITA",
address = "New Delhi, India",
year = "2019",
abstract = "We examine some of the qualitative aspects of an experiment that examined people's perception of a robot based on a change of its motion. Specifically, we look at people's qualitative opinions when the robot gets “stuck” while navigating and corrects itself. This extended abstract presents preliminary results and themes that we wish to examine."
}
@inproceedings{Rossi2018b,
author = "Rossi, Alessandra and Holthaus, Patrick and Dautenhahn, Kerstin and Koay, Kheng Lee and Walters, Michael L.",
title = "Programming Pepper: What can you make a humanoid robot do?",
booktitle = "3rd Workshop on Behavior Adaptation, Interaction and Learning for Assistive Robotics (BAILAR 2018)",
address = "Nanjing, China",
year = "2018",
abstract = "The UK Robotics Week provided an opportunity to engage the UK nation's schools, colleges and universities in developing skills needed to drive the UK's technological future economy. Within this contest we decided to present a series of events to introduce school children to the state-of-art of social Human-Robot Interaction (HRI) and some currently adopted social cues. The students were exposed to three different types of HRI: a video HRI, a real live HRI and HRI programming of a robot. In particular, during the programming sessions, students were focused on the implementation of emotions in HRI. Future works will use the results collected during this event to investigate the impact of human perceptions of trust and acceptability of robots in Human-Robot Interactions."
}
@article{Rossi2024,
author = "Rossi, Alessandra and Holthaus, Patrick and Moros, Sílvia and Lakatos, Gabriella and Andriella, Antonio and Scheunemann, Marcus and Van Maris, Anouk",
title = "{Trust, Acceptance and Social Cues in Human-Robot Interaction (SCRITA)}",
journal = "International Journal of Social Robotics",
year = "2024",
doi = "10.1007/s12369-024-01154-w"
}
@misc{Foerster2023c,
author = "Förster, Frank and Romeo, Marta and Holthaus, Patrick and Galvez Trigo, Maria J. and Fischer, Joel E. and Nesset, Birthe and Dondrup, Christian and Murad, Christine and Munteanu, Cosmin and Cowan, Benjamin R. and Clark, Leigh and Porcheron, Martin and Candello, Heloisa and Langevin, Raina",
doi = "10.48550/arXiv.2401.04108",
url = "https://arxiv.org/abs/2401.04108",
title = "{Working with Trouble and Failures in Conversation between Humans and Robots (WTF 2023) \\& Is CUI Design Ready Yet?}",
publisher = "arXiv",
year = "2023",
copyright = "CC-BY"
}
@misc{Holthaus2023a,
author = "Holthaus, Patrick",
doi = "10.48550/arXiv.2303.01316",
url = "https://arxiv.org/abs/2303.01316",
title = "{Interactive robots as inclusive tools to increase diversity in higher education}",
publisher = "arXiv",
year = "2023",
copyright = "CC-BY"
}
@misc{Rossi2023a,
author = "Rossi, Alessandra and Holthaus, Patrick and Lakatos, Gabriella and Moros, Sílvia and Riches, Lewis",
doi = "10.48550/ARXIV.2311.05401",
url = "https://arxiv.org/abs/2311.05401",
title = "{SCRITA 2023: Trust, Acceptance and Social Cues in Human-Robot Interaction}",
publisher = "arXiv",
year = "2023",
copyright = "arXiv.org perpetual, non-exclusive license"
}
@misc{Rossi2022,
author = "Rossi, Alessandra and Holthaus, Patrick and Moros, Sílvia and Lakatos, Gabriella",
doi = "10.48550/ARXIV.2208.11090",
url = "https://arxiv.org/abs/2208.11090",
title = "{IEEE Trust, Acceptance and Social Cues in Human-Robot Interaction -- SCRITA 2022 Workshop}",
publisher = "arXiv",
year = "2022",
copyright = "Creative Commons Attribution 4.0 International"
}
@inproceedings{Shaw2022,
author = "Shaw, Patricia and Labrosse, Frédéric and Wilson, Myra and Jones, David and Akanyeti, Otar and Fearn, Tomos and Holthaus, Patrick and Coughlan, Jane-Lisa and Knight, Marianna",
title = "{\\#UKRAS22: The 5th UK Robotics and Autonomous Systems Conference}",
booktitle = "Proceedings of the 5th UK-RAS Conference: Robotics for Unconstrained Environments (\\#UKRAS22)",
address = "Aberystwyth, UK",
year = "2022",
pages = "1--2",
doi = "10.31256/Ha9Ys9D"
}
@inproceedings{Holthaus2021b,
author = "Holthaus, Patrick and Amirabdollahian, Farshid and Asher, Claire and Richards, Arthur",
title = "{\\#UKRAS21: The 4th UK Robotics and Autonomous Systems Conference}",
booktitle = "Proceedings of the 4th UK-RAS Conference: Robotics at Home (\\#UKRAS21)",
address = "Hatfield, UK",
year = "2021",
pages = "1--2",
doi = "10.31256/Ft3Ex7U"
}
@misc{Rossi2021a,
author = "Rossi, Alessandra and Holthaus, Patrick and Moros, Sílvia and Scheunemann, Marcus and Lakatos, Gabriella",
doi = "10.48550/ARXIV.2108.08092",
url = "https://arxiv.org/abs/2108.08092",
title = "{Trust, Acceptance and Social Cues in Human-Robot Interaction -- SCRITA 2021}",
publisher = "arXiv",
year = "2021",
copyright = "arXiv.org perpetual, non-exclusive license"
}
@article{Rossi2021,
author = "Rossi, Alessandra and Holthaus, Patrick and Perugia, Giulia and Moros, Sílvia and Scheunemann, Marcus",
title = "{Trust, Acceptance and Social Cues in Human-Robot Interaction (SCRITA)}",
journal = "International Journal of Social Robotics",
year = "2021",
volume = "13",
number = "8",
pages = "1833--1834",
doi = "10.1007/s12369-021-00844-z"
}
@article{Rossi2019,
author = "Rossi, Alessandra and Koay, Kheng Lee and Moros, Sílvia and Holthaus, Patrick and Scheunemann, Marcus",
title = "{Social cues in robot interaction, trust and acceptance}",
year = "2019",
volume = "20",
number = "3",
journal = "Interaction Studies",
doi = "10.1075/is.20.3",
pages = "391--392",
publisher = "John Benjamins Publishing Company"
}
@article{Holthaus2016c,
author = "Holthaus, Patrick",
title = "{Community News: Conference Report LREC 2016}",
journal = "KI - Künstliche Intelligenz",
year = "2016",
volume = "30",
number = "3",
pages = "349--354",
issn = "1610-1987",
doi = "10.1007/s13218-016-0447-7"
}
@phdthesis{Holthaus2014b,
author = "Holthaus, Patrick",
school = "Bielefeld University",
title = "{Approaching Human-Like Spatial Awareness in Social Robotics - An Investigation of Spatial Interaction Strategies with a Receptionist Robot}",
type = "PhD Thesis",
year = "2014",
url = "https://pub.uni-bielefeld.de/record/2733038",
abstract = "This doctoral thesis investigates the influence of social signals in the spatial domain that aim to raise a robot's awareness towards its human interlocutor. A concept of spatial awareness thereby extends the robot’s possibilities for expressing its knowledge about the situation as well as its own capabilities. As a result, especially untrained users can build up more appropriate expectations about the current situation which supposedly leads to a minimization of misunderstandings and thereby an enhancement of user experience. On the background of research that investigates communication among humans, relations are drawn in order to utilize gained insights for developing a robot that is capable of acting socially intelligent with regard to human-like treatment of spatial configurations and signals. In a study-driven approach, an integrated concept of spatial awareness is therefore proposed. An important aspect of that concept, which is founded in its spatial extent, lies in its aspiration to cover a holistic encounter between human and robot with the goal to improve user experience from the first sight until the end of reciprocal awareness. It describes how spatial configurations and signals can be perceived and interpreted in a social robot. Furthermore, it also presents signals and behavioral properties for such a robot that target at influencing said configurations and enhancing robot verbosity. In order to approve the concept’s validity in realistic settings, an interactive scenario is presented in the form of a receptionist robot to which it is applied. In the context of this setup, a comprehensive user study is conducted that verifies the implementation of spatial awareness to be beneficial for an interaction with humans that are naive to the subject. Furthermore, the importance of addressing an entire encounter in human-robot interaction is confirmed as well as a strong interdependency of a robot’s social signals among each other."
}