2024
|
| Hiroshi Furuya; Zubin Choudhary; Jasmine Joyce DeGuzman; Matt Gottsacker; Gerd Bruder; Greg Welch Using Simulated Real-world Terrain in VR to Study Outdoor AR Topographic Map Interfaces Proceedings Article Forthcoming In: Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2024), Tsukuba, Japan, December 1-3, 2024, pp. 1-10, Forthcoming. @inproceedings{Furuya2024topo,
title = {Using Simulated Real-world Terrain in VR to Study Outdoor AR Topographic Map Interfaces},
author = {Hiroshi Furuya and Zubin Choudhary and Jasmine Joyce DeGuzman and Matt Gottsacker and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2024/10/cameraready_ICAT_EGVE_2024_1029_topographic_map.pdf},
doi = {tbd},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-01},
booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2024), Tsukuba, Japan, December 1-3, 2024},
pages = {1-10},
abstract = {Augmented reality (AR) technology enables advanced integration of spatial information useful in a variety of important domains, including for reading topographic maps in the field. It is also important to understand how this technology may potentially affect spatial learning ability. In this paper, we demonstrate the use of virtual reality (VR) to conduct a human-subject study investigating the impacts of different simulated AR topographic map interface designs on spatial learning outcomes. Our results show that interfaces that encourage engagement with the interface instead of with the map and the environment result in fast task completion times but poor spatial learning. We also found participant preference for a novel interface design that assists users with map orientation without explicitly guiding the user through the task.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Augmented reality (AR) technology enables advanced integration of spatial information useful in a variety of important domains, including for reading topographic maps in the field. It is also important to understand how this technology may potentially affect spatial learning ability. In this paper, we demonstrate the use of virtual reality (VR) to conduct a human-subject study investigating the impacts of different simulated AR topographic map interface designs on spatial learning outcomes. Our results show that interfaces that encourage engagement with the interface instead of with the map and the environment result in fast task completion times but poor spatial learning. We also found participant preference for a novel interface design that assists users with map orientation without explicitly guiding the user through the task. |
| Zubin Choudhary; Laura Battistel; Raiffa Syamil; Hiroshi Furuya; Ferran Argelaguet; Gerd Bruder; Gregory F. Welch Examining the Effects of Teleportation on Semantic Memory of a Virtual Museum Compared to Natural Walking Proceedings Article Forthcoming In: Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2024), Tsukuba, Japan, December 1-3, 2024, pp. 1-12, Forthcoming. @inproceedings{Choudhary2024walking,
title = {Examining the Effects of Teleportation on Semantic Memory of a Virtual Museum Compared to Natural Walking},
author = {Zubin Choudhary and Laura Battistel and Raiffa Syamil and Hiroshi Furuya and Ferran Argelaguet and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2024/10/paper1028_2.pdf},
year = {2024},
date = {2024-12-01},
urldate = {2024-12-01},
booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2024), Tsukuba, Japan, December 1-3, 2024},
pages = {1-12},
abstract = {Over the past decades there has been extensive research investigating the trade-offs between various Virtual Reality (VR) locomotion techniques. One of the most highly researched techniques is teleportation, due to its ability to quickly traverse large virtual spaces even in limited physical tracking spaces. The majority of teleportation research has been focused on its effects on spatial cognition, such as spatial understanding and retention. However, relatively little is known about whether the use of teleportation in immersive learning experiences can effect the acquisition of semantic knowledge — our knowledge about facts, concepts, and ideas — which is essential for long-term learning. In this paper we present a human-subjects study to investigate the effects of teleportation compared to natural walking on the retention of semantic information about artifacts in a virtual museum. Participants visited unique 3D artifacts accompanied by audio clips and artifact names. Our results show that participants reached the same semantic memory performance with both locomotion techniques but with different behaviors, self-assessed performance, and preference. In particular, participants subjectively indicated that they felt that they recalled more semantic memory with walking than teleportation. However, objectively, they spent more time with the artifacts while walking, meaning that they learnt less per a set amount of time than with teleportation. We discuss the relationships, implications, and guidelines for VR experiences designed to help users acquire new knowledge.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inproceedings}
}
Over the past decades there has been extensive research investigating the trade-offs between various Virtual Reality (VR) locomotion techniques. One of the most highly researched techniques is teleportation, due to its ability to quickly traverse large virtual spaces even in limited physical tracking spaces. The majority of teleportation research has been focused on its effects on spatial cognition, such as spatial understanding and retention. However, relatively little is known about whether the use of teleportation in immersive learning experiences can effect the acquisition of semantic knowledge — our knowledge about facts, concepts, and ideas — which is essential for long-term learning. In this paper we present a human-subjects study to investigate the effects of teleportation compared to natural walking on the retention of semantic information about artifacts in a virtual museum. Participants visited unique 3D artifacts accompanied by audio clips and artifact names. Our results show that participants reached the same semantic memory performance with both locomotion techniques but with different behaviors, self-assessed performance, and preference. In particular, participants subjectively indicated that they felt that they recalled more semantic memory with walking than teleportation. However, objectively, they spent more time with the artifacts while walking, meaning that they learnt less per a set amount of time than with teleportation. We discuss the relationships, implications, and guidelines for VR experiences designed to help users acquire new knowledge. |
| Hiroshi Furuya; Laura Battistel; Zubin Datta Choudhary; Matt Gottsacker; Gerd Bruder; Gregory F Welch Difficulties in Perceiving and Understanding Robot Reliability Changes in a Sequential Binary Task Proceedings Article In: Proceedings of the 2024 ACM Symposium on Spatial User Interaction, pp. 1-11, Association for Computing Machinery, Trier, Germany, 2024, ISBN: 9798400710889. @inproceedings{Furuya2024perceive,
title = {Difficulties in Perceiving and Understanding Robot Reliability Changes in a Sequential Binary Task},
author = {Hiroshi Furuya and Laura Battistel and Zubin Datta Choudhary and Matt Gottsacker and Gerd Bruder and Gregory F Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2024/08/SUI_2024___Difficulties_in_Perceiving_and_Understanding_Robot_Reliability_Changes_in_a_Sequential_Binary_Task-1-2.pdf},
doi = {10.1145/3677386.3682083},
isbn = {9798400710889},
year = {2024},
date = {2024-10-07},
urldate = {2024-10-07},
booktitle = {Proceedings of the 2024 ACM Symposium on Spatial User Interaction},
pages = {1-11},
publisher = {Association for Computing Machinery},
address = {Trier, Germany},
series = {SUI '24},
abstract = {Human-robot teams push the boundaries of what both humans and robots can accomplish. In order for the team to function well, the human must accurately assess the robot’s capabilities to calibrate the trust between the human and robot. In this paper, we use virtual reality (VR), a widely accepted tool in studying human-robot interaction (HRI), to study human behaviors affecting their detection and understanding of changes in a simulated robot’s reliability. We present a human-subject study to see how different reliability change factors may affect this process. Our results demonstrate that participants make judgements about robot reliability before they have accumulated sufficient evidence to make objectively high-confidence inferences about robot reliability. We show that this reliability change observation behavior diverges from behavior expectations based on the probability distribution functions used to describe observation outcomes.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Human-robot teams push the boundaries of what both humans and robots can accomplish. In order for the team to function well, the human must accurately assess the robot’s capabilities to calibrate the trust between the human and robot. In this paper, we use virtual reality (VR), a widely accepted tool in studying human-robot interaction (HRI), to study human behaviors affecting their detection and understanding of changes in a simulated robot’s reliability. We present a human-subject study to see how different reliability change factors may affect this process. Our results demonstrate that participants make judgements about robot reliability before they have accumulated sufficient evidence to make objectively high-confidence inferences about robot reliability. We show that this reliability change observation behavior diverges from behavior expectations based on the probability distribution functions used to describe observation outcomes. |
| Matt Gottsacker; Hiroshi Furuya; Zubin Choudhary; Austin Erickson; Ryan Schubert; Gerd Bruder; Michael P. Browne; Gregory F. Welch Investigating the relationships between user behaviors and tracking factors on task performance and trust in augmented reality Journal Article In: Elsevier Computers & Graphics, vol. 123, pp. 1-14, 2024. @article{gottsacker2024trust,
title = {Investigating the relationships between user behaviors and tracking factors on task performance and trust in augmented reality},
author = {Matt Gottsacker and Hiroshi Furuya and Zubin Choudhary and Austin Erickson and Ryan Schubert and Gerd Bruder and Michael P. Browne and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2024/08/C_G____ARTrust____Accuracy___Precision.pdf},
doi = {https://doi.org/10.1016/j.cag.2024.104035},
year = {2024},
date = {2024-08-06},
urldate = {2024-08-06},
journal = {Elsevier Computers & Graphics},
volume = {123},
pages = {1-14},
abstract = {This research paper explores the impact of augmented reality (AR) tracking characteristics, specifically an AR head-worn display's tracking registration accuracy and precision, on users' spatial abilities and subjective perceptions of trust in and reliance on the technology. Our study aims to clarify the relationships between user performance and the different behaviors users may employ based on varying degrees of trust in and reliance on AR. Our controlled experimental setup used a 360 field-of-regard search-and-selection task and combines the immersive aspects of a CAVE-like environment with AR overlays viewed with a head-worn display. We investigated three levels of simulated AR tracking errors in terms of both accuracy and precision (+0, +1, +2). We controlled for four user task behaviors that correspond to different levels of trust in and reliance on an AR system: AR-Only (only relying on AR), AR-First (prioritizing AR over real world), Real-Only (only relying on real world), and Real-First (prioritizing real world over AR). By controlling for these behaviors, our results showed that even small amounts of AR tracking errors had noticeable effects on users' task performance, especially if they relied completely on the AR cues (AR-Only). Our results link AR tracking characteristics with user behavior, highlighting the importance of understanding these elements to improve AR technology and user satisfaction.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
This research paper explores the impact of augmented reality (AR) tracking characteristics, specifically an AR head-worn display's tracking registration accuracy and precision, on users' spatial abilities and subjective perceptions of trust in and reliance on the technology. Our study aims to clarify the relationships between user performance and the different behaviors users may employ based on varying degrees of trust in and reliance on AR. Our controlled experimental setup used a 360 field-of-regard search-and-selection task and combines the immersive aspects of a CAVE-like environment with AR overlays viewed with a head-worn display. We investigated three levels of simulated AR tracking errors in terms of both accuracy and precision (+0, +1, +2). We controlled for four user task behaviors that correspond to different levels of trust in and reliance on an AR system: AR-Only (only relying on AR), AR-First (prioritizing AR over real world), Real-Only (only relying on real world), and Real-First (prioritizing real world over AR). By controlling for these behaviors, our results showed that even small amounts of AR tracking errors had noticeable effects on users' task performance, especially if they relied completely on the AR cues (AR-Only). Our results link AR tracking characteristics with user behavior, highlighting the importance of understanding these elements to improve AR technology and user satisfaction. |
| Gerd Bruder; Michael Browne; Zubin Choudhary; Austin Erickson; Hiroshi Furuya; Matt Gottsacker; Ryan Schubert; Gregory Welch Visual Factors Influencing Trust and Reliance with Augmented Reality Systems Journal Article In: Journal of Vision Abstracts—Vision Sciences Society (VSS) Annual Meeting, 2024. @article{Bruder2024,
title = {Visual Factors Influencing Trust and Reliance with Augmented Reality Systems},
author = {Gerd Bruder and Michael Browne and Zubin Choudhary and Austin Erickson and Hiroshi Furuya and Matt Gottsacker and Ryan Schubert and Gregory Welch},
year = {2024},
date = {2024-05-17},
urldate = {2024-05-17},
journal = {Journal of Vision Abstracts—Vision Sciences Society (VSS) Annual Meeting},
abstract = {Augmented Reality (AR) systems are increasingly used for simulations, training, and operations across a wide range of application fields. Unfortunately, the imagery that current AR systems create often does not match our visual perception of the real world, which can make users feel like the AR system is not believable. This lack of belief can lead to negative training or experiences, where users lose trust in the AR system and adjust their reliance on AR. The latter is characterized by users adopting different cognitive perception-action pathways by which they integrate AR visual information for spatial tasks. In this work, we present a series of six within-subjects experiments (each N=20) in which we investigated trust in AR with respect to two display factors (field of view and visual contrast), two tracking factors (accuracy and precision), and two network factors (latency and dropouts). Participants performed a 360-degree visual search-and-selection task in a hybrid setup involving an AR head-mounted display and a CAVE-like simulated real environment. Participants completed the experiments with four perception-action pathways that represent different levels of the users’ reliance on an AR system: AR-Only (only relying on AR), AR-First (prioritizing AR over real world), Real-First (prioritizing real world over AR), and Real-Only (only relying on real world). Our results show that participants’ perception-action pathways and objective task performance were significantly affected by all six tested AR factors. In contrast, we found that their subjective responses for trust and reliance were often more affected by slight AR system differences than would elicit objective performance differences, and participants tended to overestimate or underestimate the trustworthiness of the AR system. Participants showed significantly higher task performance gains if their sense of trust was well-calibrated to the trustworthiness of the AR system, highlighting the importance of effectively managing users’ trust in future AR systems.
Acknowledgements: This material includes work supported in part by Vision Products LLC via US Air Force Research Laboratory (AFRL) Award Number FA864922P1038, and the Office of Naval Research under Award Numbers N00014-21-1-2578 and N00014-21-1-2882 (Dr. Peter Squire, Code 34).},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Augmented Reality (AR) systems are increasingly used for simulations, training, and operations across a wide range of application fields. Unfortunately, the imagery that current AR systems create often does not match our visual perception of the real world, which can make users feel like the AR system is not believable. This lack of belief can lead to negative training or experiences, where users lose trust in the AR system and adjust their reliance on AR. The latter is characterized by users adopting different cognitive perception-action pathways by which they integrate AR visual information for spatial tasks. In this work, we present a series of six within-subjects experiments (each N=20) in which we investigated trust in AR with respect to two display factors (field of view and visual contrast), two tracking factors (accuracy and precision), and two network factors (latency and dropouts). Participants performed a 360-degree visual search-and-selection task in a hybrid setup involving an AR head-mounted display and a CAVE-like simulated real environment. Participants completed the experiments with four perception-action pathways that represent different levels of the users’ reliance on an AR system: AR-Only (only relying on AR), AR-First (prioritizing AR over real world), Real-First (prioritizing real world over AR), and Real-Only (only relying on real world). Our results show that participants’ perception-action pathways and objective task performance were significantly affected by all six tested AR factors. In contrast, we found that their subjective responses for trust and reliance were often more affected by slight AR system differences than would elicit objective performance differences, and participants tended to overestimate or underestimate the trustworthiness of the AR system. Participants showed significantly higher task performance gains if their sense of trust was well-calibrated to the trustworthiness of the AR system, highlighting the importance of effectively managing users’ trust in future AR systems.
Acknowledgements: This material includes work supported in part by Vision Products LLC via US Air Force Research Laboratory (AFRL) Award Number FA864922P1038, and the Office of Naval Research under Award Numbers N00014-21-1-2578 and N00014-21-1-2882 (Dr. Peter Squire, Code 34). |
2023
|
| Zubin Choudhary; Gerd Bruder; Gregory F. Welch Visual Facial Enhancements Can Significantly Improve Speech Perception in the Presence of Noise Journal Article In: IEEE Transactions on Visualization and Computer Graphics, Special Issue on the IEEE International Symposium on Mixed and Augmented Reality (ISMAR) 2023., 2023. @article{Choudhary2023Speech,
title = {Visual Facial Enhancements Can Significantly Improve Speech Perception in the Presence of Noise},
author = {Zubin Choudhary and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2023/07/final_sub1046_ISMAR23-compressed.pdf},
year = {2023},
date = {2023-10-17},
urldate = {2023-10-17},
journal = {IEEE Transactions on Visualization and Computer Graphics, Special Issue on the IEEE International Symposium on Mixed and Augmented Reality (ISMAR) 2023.},
abstract = {Human speech perception is generally optimal in quiet environments, however it becomes more difficult and error prone in the presence of noise, such as other humans speaking nearby or ambient noise. In such situations, human speech perception is improved by speech reading, i.e., watching the movements of a speaker’s mouth and face, either consciously as done by people with hearing loss or subconsciously by other humans. While previous work focused largely on speech perception of two-dimensional videos of faces, there is a gap in the research field focusing on facial features as seen in head-mounted displays, including the impacts of display resolution, and the effectiveness of visually enhancing a virtual human face on speech perception in the presence of noise.
In this paper, we present a comparative user study (N = 21) in which we investigated an audio-only condition compared to two levels of head-mounted display resolution (1832×1920 or 916×960 pixels per eye) and two levels of the native or visually enhanced appearance of a virtual human, the latter consisting of an up-scaled facial representation and simulated lipstick (lip coloring) added to increase contrast. To understand effects on speech perception in noise, we measured participants’ speech reception thresholds (SRTs) for each audio-visual stimulus condition. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. First, we show that the display resolution significantly affected participants’ ability to perceive the speech signal in noise, which has practical implications for the field, especially in social virtual environments. Second, we show that our visual enhancement method was able to compensate for limited display resolution and was generally preferred by participants. Specifically, our participants indicated that they benefited from the head scaling more than the added facial contrast from the simulated lipstick. We discuss relationships, implications, and guidelines for applications that aim to leverage such enhancements.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Human speech perception is generally optimal in quiet environments, however it becomes more difficult and error prone in the presence of noise, such as other humans speaking nearby or ambient noise. In such situations, human speech perception is improved by speech reading, i.e., watching the movements of a speaker’s mouth and face, either consciously as done by people with hearing loss or subconsciously by other humans. While previous work focused largely on speech perception of two-dimensional videos of faces, there is a gap in the research field focusing on facial features as seen in head-mounted displays, including the impacts of display resolution, and the effectiveness of visually enhancing a virtual human face on speech perception in the presence of noise.
In this paper, we present a comparative user study (N = 21) in which we investigated an audio-only condition compared to two levels of head-mounted display resolution (1832×1920 or 916×960 pixels per eye) and two levels of the native or visually enhanced appearance of a virtual human, the latter consisting of an up-scaled facial representation and simulated lipstick (lip coloring) added to increase contrast. To understand effects on speech perception in noise, we measured participants’ speech reception thresholds (SRTs) for each audio-visual stimulus condition. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. First, we show that the display resolution significantly affected participants’ ability to perceive the speech signal in noise, which has practical implications for the field, especially in social virtual environments. Second, we show that our visual enhancement method was able to compensate for limited display resolution and was generally preferred by participants. Specifically, our participants indicated that they benefited from the head scaling more than the added facial contrast from the simulated lipstick. We discuss relationships, implications, and guidelines for applications that aim to leverage such enhancements. |
| Zubin Choudhary; Gerd Bruder; Greg Welch Visual Hearing Aids: Artificial Visual Speech Stimuli for Audiovisual Speech Perception in Noise Conference Proceedings of the 29th ACM Symposium on Virtual Reality Software and Technology, 2023, 2023. @conference{Choudhary2023aids,
title = {Visual Hearing Aids: Artificial Visual Speech Stimuli for Audiovisual Speech Perception in Noise},
author = {Zubin Choudhary and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2023/09/MAIN_VRST_23_SpeechPerception_Phone.pdf},
year = {2023},
date = {2023-10-09},
urldate = {2023-10-09},
booktitle = {Proceedings of the 29th ACM Symposium on Virtual Reality Software and Technology, 2023},
abstract = {Speech perception is optimal in quiet environments, but noise can impair comprehension and increase errors. In these situations, lip reading can help, but it is not always possible, such as during an audio call or when wearing a face mask. One approach to improve speech perception in these situations is to use an artificial visual lip reading aid. In this paper, we present a user study (𝑁 = 17) in which we compared three levels of audio stimuli visualizations and two levels of modulating the appearance of the visualization based on the speech signal, and we compared them against two control conditions: an audio-only condition, and a real human speaking. We measured participants’ speech reception thresholds (SRTs) to understand the effects of these visualizations on speech perception in noise. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. Additionally, we measured the usability of the approaches and the user experience. We found that the different artificial visualizations improved participants’ speech reception compared to the audio-only baseline condition, but they were significantly poorer than the real human condition. This suggests that different visualizations can improve speech perception when the speaker’s face is not available. However, we also discuss limitations of current plug-and-play lip sync software and abstract representations of the speaker in the context of speech perception.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Speech perception is optimal in quiet environments, but noise can impair comprehension and increase errors. In these situations, lip reading can help, but it is not always possible, such as during an audio call or when wearing a face mask. One approach to improve speech perception in these situations is to use an artificial visual lip reading aid. In this paper, we present a user study (𝑁 = 17) in which we compared three levels of audio stimuli visualizations and two levels of modulating the appearance of the visualization based on the speech signal, and we compared them against two control conditions: an audio-only condition, and a real human speaking. We measured participants’ speech reception thresholds (SRTs) to understand the effects of these visualizations on speech perception in noise. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. Additionally, we measured the usability of the approaches and the user experience. We found that the different artificial visualizations improved participants’ speech reception compared to the audio-only baseline condition, but they were significantly poorer than the real human condition. This suggests that different visualizations can improve speech perception when the speaker’s face is not available. However, we also discuss limitations of current plug-and-play lip sync software and abstract representations of the speaker in the context of speech perception. |
| Zubin Choudhary; Nahal Norouzi; Austin Erickson; Ryan Schubert; Gerd Bruder; Gregory F. Welch Exploring the Social Influence of Virtual Humans Unintentionally Conveying Conflicting Emotions Conference Proceedings of the 30th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE VR 2023, 2023. @conference{Choudhary2023,
title = {Exploring the Social Influence of Virtual Humans Unintentionally Conveying Conflicting Emotions},
author = {Zubin Choudhary and Nahal Norouzi and Austin Erickson and Ryan Schubert and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2023/01/PostReview_ConflictingEmotions_IEEEVR23-1.pdf},
year = {2023},
date = {2023-03-29},
urldate = {2023-03-29},
booktitle = {Proceedings of the 30th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE VR 2023},
abstract = {The expression of human emotion is integral to social interaction, and in virtual reality it is increasingly common to develop virtual avatars that attempt to convey emotions by mimicking these visual and aural cues, i.e. the facial and vocal expressions. However, errors in (or the absence of) facial tracking can result in the rendering of incorrect facial expressions on these virtual avatars. For example, a virtual avatar may speak with a happy or unhappy vocal inflection while their facial expression remains otherwise neutral. In circumstances where there is conflict between the avatar’s facial and vocal expressions, it is possible that users will incorrectly interpret the
avatar’s emotion, which may have unintended consequences in terms of social influence or in terms of the outcome of the interaction.
In this paper, we present a human-subjects study (N = 22 ) aimed at understanding the impact of conflicting facial and vocal emotional expressions. Specifically we explored three levels of emotional valence (unhappy, neutral, and happy) expressed in both visual (facial) and aural (vocal) forms. We also investigate three levels of head scales (down-scaled, accurate, and up-scaled) to evaluate whether head scale affects user interpretation of the conveyed emotion. We find significant effects of different multimodal expressions on happiness and trust perception, while no significant effect was observed for head scales. Evidence from our results suggest that facial expressions have a stronger impact than vocal expressions. Additionally, as the difference between the two expressions increase, the less predictable the multimodal expression becomes. For example, for the happy-looking and happy-sounding multimodal expression, we expect and see high happiness rating and high trust, however if one of the two expressions change, this mismatch makes the expression less predictable. We discuss the relationships, implications, and guidelines for social applications that aim to leverage multimodal social cues.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
The expression of human emotion is integral to social interaction, and in virtual reality it is increasingly common to develop virtual avatars that attempt to convey emotions by mimicking these visual and aural cues, i.e. the facial and vocal expressions. However, errors in (or the absence of) facial tracking can result in the rendering of incorrect facial expressions on these virtual avatars. For example, a virtual avatar may speak with a happy or unhappy vocal inflection while their facial expression remains otherwise neutral. In circumstances where there is conflict between the avatar’s facial and vocal expressions, it is possible that users will incorrectly interpret the
avatar’s emotion, which may have unintended consequences in terms of social influence or in terms of the outcome of the interaction.
In this paper, we present a human-subjects study (N = 22 ) aimed at understanding the impact of conflicting facial and vocal emotional expressions. Specifically we explored three levels of emotional valence (unhappy, neutral, and happy) expressed in both visual (facial) and aural (vocal) forms. We also investigate three levels of head scales (down-scaled, accurate, and up-scaled) to evaluate whether head scale affects user interpretation of the conveyed emotion. We find significant effects of different multimodal expressions on happiness and trust perception, while no significant effect was observed for head scales. Evidence from our results suggest that facial expressions have a stronger impact than vocal expressions. Additionally, as the difference between the two expressions increase, the less predictable the multimodal expression becomes. For example, for the happy-looking and happy-sounding multimodal expression, we expect and see high happiness rating and high trust, however if one of the two expressions change, this mismatch makes the expression less predictable. We discuss the relationships, implications, and guidelines for social applications that aim to leverage multimodal social cues. |
2022
|
| Zubin Choudhary; Austin Erickson; Nahal Norouzi; Kangsoo Kim; Gerd Bruder; Greg Welch Virtual Big Heads in Extended Reality: Estimation of Ideal Head Scales and Perceptual Thresholds for Comfort and Facial Cues Journal Article In: ACM Transactions on Applied Perception, 2022. @article{Choudhary2022,
title = {Virtual Big Heads in Extended Reality: Estimation of Ideal Head Scales and Perceptual Thresholds for Comfort and Facial Cues},
author = {Zubin Choudhary and Austin Erickson and Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Greg Welch},
url = {https://drive.google.com/file/d/1jdxwLchDH0RPouVENoSx8iSOyDmJhqKb/view?usp=sharing},
year = {2022},
date = {2022-11-02},
urldate = {2022-11-02},
journal = {ACM Transactions on Applied Perception},
abstract = {Extended reality (XR) technologies, such as virtual reality (VR) and augmented reality (AR), provide users, their avatars, and embodied agents a shared platform to collaborate in a spatial context. While traditional face-to-face communication is limited by users' proximity, meaning that another human's non-verbal embodied cues become more difficult to perceive the farther one is away from that person, In this paper, we describe and evaluate the ``Big Head'' technique, in which a human's head in VR/AR is scaled up relative to their distance from the observer as a mechanism for enhancing the visibility of non-verbal facial cues, such as facial expressions or eye gaze. To better understand and explore this technique, we present two complimentary human-subject experiments in this paper.
In our first experiment, we conducted a VR study with a head-mounted display (HMD) to understand the impact of increased or decreased head scales on participants' ability to perceive facial expressions as well as their sense of comfort and feeling of ``uncannniness'' over distances of up to 10 meters. We explored two different scaling methods and compared perceptual thresholds and user preferences. Our second experiment was performed in an outdoor AR environment with an optical see-through (OST) HMD. Participants were asked to estimate facial expressions and eye gaze, and identify a virtual human over large distances of 30, 60, and 90 meters. In both experiments, our results show significant differences in minimum, maximum, and ideal head scales for different distances and tasks related to perceiving faces, facial expressions, and eye gaze, while we also found that participants were more comfortable with slightly bigger heads at larger distances. We discuss our findings with respect to the technologies used, and we discuss implications and guidelines for practical applications that aim to leverage XR-enhanced facial cues.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Extended reality (XR) technologies, such as virtual reality (VR) and augmented reality (AR), provide users, their avatars, and embodied agents a shared platform to collaborate in a spatial context. While traditional face-to-face communication is limited by users' proximity, meaning that another human's non-verbal embodied cues become more difficult to perceive the farther one is away from that person, In this paper, we describe and evaluate the ``Big Head'' technique, in which a human's head in VR/AR is scaled up relative to their distance from the observer as a mechanism for enhancing the visibility of non-verbal facial cues, such as facial expressions or eye gaze. To better understand and explore this technique, we present two complimentary human-subject experiments in this paper.
In our first experiment, we conducted a VR study with a head-mounted display (HMD) to understand the impact of increased or decreased head scales on participants' ability to perceive facial expressions as well as their sense of comfort and feeling of ``uncannniness'' over distances of up to 10 meters. We explored two different scaling methods and compared perceptual thresholds and user preferences. Our second experiment was performed in an outdoor AR environment with an optical see-through (OST) HMD. Participants were asked to estimate facial expressions and eye gaze, and identify a virtual human over large distances of 30, 60, and 90 meters. In both experiments, our results show significant differences in minimum, maximum, and ideal head scales for different distances and tasks related to perceiving faces, facial expressions, and eye gaze, while we also found that participants were more comfortable with slightly bigger heads at larger distances. We discuss our findings with respect to the technologies used, and we discuss implications and guidelines for practical applications that aim to leverage XR-enhanced facial cues. |
2021
|
| Zubin Choudhary; Jesus Ugarte; Gerd Bruder; Greg Welch Real-Time Magnification in Augmented Reality Conference Proceedings of the 2021 ACM Spatial User Interaction, SUI 2021 ACM 2021. @conference{Choudhary2021d,
title = {Real-Time Magnification in Augmented Reality},
author = {Zubin Choudhary and Jesus Ugarte and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_AR_Magnification_DEMO.pdf},
year = {2021},
date = {2021-11-09},
urldate = {2021-11-09},
booktitle = {Proceedings of the 2021 ACM Spatial User Interaction},
pages = {1-2},
organization = {ACM},
series = {SUI 2021},
abstract = {With recent advances in augmented reality (AR) and computer vision it has become possible to magnify objects in real time in a user’s field of view. AR object magnification can have different purposes, such as enhancing human visual capabilities with the BigHead technique, which works by up-scaling human heads to communicate important facial cues over longer distances. For this
purpose, we created a prototype with a 4K camera mounted on a HoloLens 2. In this demo, we present the BigHead technique and proof of concept AR testbed to magnify heads in real-time. Further,
we describe how hand gestures are detected to control the scale and position of the magnified head. We discuss the technique and implementation, and propose future research directions.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
With recent advances in augmented reality (AR) and computer vision it has become possible to magnify objects in real time in a user’s field of view. AR object magnification can have different purposes, such as enhancing human visual capabilities with the BigHead technique, which works by up-scaling human heads to communicate important facial cues over longer distances. For this
purpose, we created a prototype with a 4K camera mounted on a HoloLens 2. In this demo, we present the BigHead technique and proof of concept AR testbed to magnify heads in real-time. Further,
we describe how hand gestures are detected to control the scale and position of the magnified head. We discuss the technique and implementation, and propose future research directions. |
| Connor D. Flick; Courtney J. Harris; Nikolas T. Yonkers; Nahal Norouzi; Austin Erickson; Zubin Choudhary; Matt Gottsacker; Gerd Bruder; Gregory F. Welch Trade-offs in Augmented Reality User Interfaces for Controlling a Smart Environment Proceedings Article In: In Symposium on Spatial User Interaction (SUI '21), pp. 1-11, Association for Computing Machinery, New York, NY, USA, 2021. @inproceedings{Flick2021,
title = {Trade-offs in Augmented Reality User Interfaces for Controlling a Smart Environment},
author = {Connor D. Flick and Courtney J. Harris and Nikolas T. Yonkers and Nahal Norouzi and Austin Erickson and Zubin Choudhary and Matt Gottsacker and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_REU_Paper.pdf},
year = {2021},
date = {2021-11-09},
urldate = {2021-11-09},
booktitle = {In Symposium on Spatial User Interaction (SUI '21)},
pages = {1-11},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Smart devices and Internet of Things (IoT) technologies are replacing or being incorporated into traditional devices at a growing pace. The use of digital interfaces to interact with these devices has become a common occurrence in homes, work spaces, and various industries around the world. The most common interfaces for these connected devices focus on mobile apps or voice control via intelligent virtual assistants. However, with augmented reality (AR) becoming more popular and accessible among consumers, there are new opportunities for spatial user interfaces to seamlessly bridge the gap between digital and physical affordances.
In this paper, we present a human-subject study evaluating and comparing four user interfaces for smart connected environments: gaze input, hand gestures, voice input, and a mobile app. We assessed participants’ user experience, usability, task load, completion time, and preferences. Our results show multiple trade-offs between these interfaces across these measures. In particular, we found that gaze input shows great potential for future use cases, while both gaze input and hand gestures suffer from limited familiarity among users, compared to voice input and mobile apps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Smart devices and Internet of Things (IoT) technologies are replacing or being incorporated into traditional devices at a growing pace. The use of digital interfaces to interact with these devices has become a common occurrence in homes, work spaces, and various industries around the world. The most common interfaces for these connected devices focus on mobile apps or voice control via intelligent virtual assistants. However, with augmented reality (AR) becoming more popular and accessible among consumers, there are new opportunities for spatial user interfaces to seamlessly bridge the gap between digital and physical affordances.
In this paper, we present a human-subject study evaluating and comparing four user interfaces for smart connected environments: gaze input, hand gestures, voice input, and a mobile app. We assessed participants’ user experience, usability, task load, completion time, and preferences. Our results show multiple trade-offs between these interfaces across these measures. In particular, we found that gaze input shows great potential for future use cases, while both gaze input and hand gestures suffer from limited familiarity among users, compared to voice input and mobile apps. |
| Zubin Choudhary [DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality Proceedings Article In: pp. 4, IEEE 2021. @inproceedings{Choudhary2021bb,
title = {[DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality},
author = {Zubin Choudhary},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ISMAR_DC_2021-ver2.pdf},
year = {2021},
date = {2021-10-08},
urldate = {2021-10-08},
pages = {4},
organization = {IEEE},
series = {ISMAR 2021},
abstract = {Existing literature in the field of extended reality has demonstrate that visual/auditory manipulations can change a person’s perception and behavior. For example, a mismatch between the physical self and the virtual self can have psychological and behavioral implications. There are any different approaches that can incur a perceptual change. An under-explored field of research are gradual and subtle manipulations, such as scaling the food one eats or scaling the heads of people one sees. In this position paper, I provide an overview of my prior PhD research focusing on means to gradually and seamlessly scale visual and auditory stimuli in extended reality, and investigations of the corresponding changes in human perception. I discuss future research topics and potential questions to be discussed at the ISMAR 2021 Doctoral Consortium.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Existing literature in the field of extended reality has demonstrate that visual/auditory manipulations can change a person’s perception and behavior. For example, a mismatch between the physical self and the virtual self can have psychological and behavioral implications. There are any different approaches that can incur a perceptual change. An under-explored field of research are gradual and subtle manipulations, such as scaling the food one eats or scaling the heads of people one sees. In this position paper, I provide an overview of my prior PhD research focusing on means to gradually and seamlessly scale visual and auditory stimuli in extended reality, and investigations of the corresponding changes in human perception. I discuss future research topics and potential questions to be discussed at the ISMAR 2021 Doctoral Consortium. |
| Zubin Choudhary; Gerd Bruder; Gregory F. Welch Scaled User Embodied Representations in Virtual and Augmented Reality Proceedings Article In: Workshop on User-Embodied Interaction in Virtual Reality (UIVR) 2021, 2021. @inproceedings{Choudhary2021b,
title = {Scaled User Embodied Representations in Virtual and Augmented Reality},
author = {Zubin Choudhary and Gerd Bruder and Gregory F. Welch },
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/UIVR21-Submission-Final-1.pdf},
year = {2021},
date = {2021-09-08},
urldate = {2021-09-08},
publisher = {Workshop on User-Embodied Interaction in Virtual Reality (UIVR) 2021},
abstract = {Embodied user representations are important for a wide range of application domains involving human social interactions. While traditionally, human appearances were defined by the physics of the real world, we now have the means to go beyond such limitations with virtual, mixed, and augmented reality (VR/MR/AR) technologies. Different human appearances can have an impact on their perception and behavior with other users in social or collaborative environments. There is a growing literature about the impact of different user representations and behaviors on perception; however, investigating the impact of visual scaling of human body parts has so far received less attention from the research community.
In this paper, we present and discuss our position that scaled user embodied representations in VR/MR/AR could lead to significant improvements for a range of use cases. We present our previous work on this topic, including the Big Head technique, through which virtual human heads can be scaled up or down. We motivate how it can improve the visibility of facial information, such as facial expressions and eye gaze, over long distances. Even when a human would be barely visible at a distance in the real world, this technique can recover lost embodied cues. We discuss perceptual effects of scaling human body parts and outline future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Embodied user representations are important for a wide range of application domains involving human social interactions. While traditionally, human appearances were defined by the physics of the real world, we now have the means to go beyond such limitations with virtual, mixed, and augmented reality (VR/MR/AR) technologies. Different human appearances can have an impact on their perception and behavior with other users in social or collaborative environments. There is a growing literature about the impact of different user representations and behaviors on perception; however, investigating the impact of visual scaling of human body parts has so far received less attention from the research community.
In this paper, we present and discuss our position that scaled user embodied representations in VR/MR/AR could lead to significant improvements for a range of use cases. We present our previous work on this topic, including the Big Head technique, through which virtual human heads can be scaled up or down. We motivate how it can improve the visibility of facial information, such as facial expressions and eye gaze, over long distances. Even when a human would be barely visible at a distance in the real world, this technique can recover lost embodied cues. We discuss perceptual effects of scaling human body parts and outline future research. |
| Zubin Choudhary; Matt Gottsacker; Kangsoo Kim; Ryan Schubert; Jeanine Stefanucci; Gerd Bruder; Greg Welch Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality Proceedings Article In: IEEE Virtual Reality (VR), 2021, 2021. @inproceedings{Choudhary2021,
title = {Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality},
author = {Zubin Choudhary and Matt Gottsacker and Kangsoo Kim and Ryan Schubert and Jeanine Stefanucci and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/04/C2593-Revisiting-Distance-Perception-with-Scaled-Embodied-Cues-in-Social-Virtual-Reality-7.pdf},
year = {2021},
date = {2021-04-01},
publisher = {IEEE Virtual Reality (VR), 2021},
abstract = {Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically misestimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars’ embodied cues in Social VR are often scaled, e.g., by making one’s head bigger or one’s voice louder, to make social cues more pronounced over longer distances.
In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically misestimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars’ embodied cues in Social VR are often scaled, e.g., by making one’s head bigger or one’s voice louder, to make social cues more pronounced over longer distances.
In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.
|
2020
|
| Nahal Norouzi; Kangsoo Kim; Gerd Bruder; Austin Erickson; Zubin Choudhary; Yifan Li; Greg Welch A Systematic Literature Review of Embodied Augmented Reality Agents in Head-Mounted Display Environments Proceedings Article In: In Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments, pp. 11, 2020. @inproceedings{Norouzi2020c,
title = {A Systematic Literature Review of Embodied Augmented Reality Agents in Head-Mounted Display Environments},
author = {Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Austin Erickson and Zubin Choudhary and Yifan Li and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2020/11/IVC_ICAT_EGVE2020.pdf
https://www.youtube.com/watch?v=IsX5q86pH4M},
year = {2020},
date = {2020-12-02},
urldate = {2020-12-02},
booktitle = {In Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments},
pages = {11},
abstract = {Embodied agents, i.e., computer-controlled characters, have proven useful for various applications across a multitude of display setups and modalities. While most traditional work focused on embodied agents presented on a screen or projector, and a growing number of works are focusing on agents in virtual reality, a comparatively small number of publications looked at such agents in augmented reality (AR). Such AR agents, specifically when using see-through head-mounted displays (HMDs)as the display medium, show multiple critical differences to other forms of agents, including their appearances, behaviors, and physical-virtual interactivity. Due to the unique challenges in this specific field, and due to the comparatively limited attention by the research community so far, we believe that it is important to map the field to understand the current trends, challenges, and future research. In this paper, we present a systematic review of the research performed on interactive, embodied AR agents using HMDs. Starting with 1261 broadly related papers, we conducted an in-depth review of 50 directly related papers from2000 to 2020, focusing on papers that reported on user studies aiming to improve our understanding of interactive agents in AR HMD environments or their utilization in specific applications. We identified common research and application areas of AR agents through a structured iterative process, present research trends, and gaps, and share insights on future directions.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Embodied agents, i.e., computer-controlled characters, have proven useful for various applications across a multitude of display setups and modalities. While most traditional work focused on embodied agents presented on a screen or projector, and a growing number of works are focusing on agents in virtual reality, a comparatively small number of publications looked at such agents in augmented reality (AR). Such AR agents, specifically when using see-through head-mounted displays (HMDs)as the display medium, show multiple critical differences to other forms of agents, including their appearances, behaviors, and physical-virtual interactivity. Due to the unique challenges in this specific field, and due to the comparatively limited attention by the research community so far, we believe that it is important to map the field to understand the current trends, challenges, and future research. In this paper, we present a systematic review of the research performed on interactive, embodied AR agents using HMDs. Starting with 1261 broadly related papers, we conducted an in-depth review of 50 directly related papers from2000 to 2020, focusing on papers that reported on user studies aiming to improve our understanding of interactive agents in AR HMD environments or their utilization in specific applications. We identified common research and application areas of AR agents through a structured iterative process, present research trends, and gaps, and share insights on future directions. |
| Zubin Choudhary; Kangsoo Kim; Ryan Schubert; Gerd Bruder; Gregory F. Welch Virtual Big Heads: Analysis of Human Perception and Comfort of Head Scales in Social Virtual Reality Proceedings Article In: Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR), pp. 425-433, Atlanta, Georgia, 2020. @inproceedings{Choudhary2020vbh,
title = {Virtual Big Heads: Analysis of Human Perception and Comfort of Head Scales in Social Virtual Reality},
author = {Zubin Choudhary and Kangsoo Kim and Ryan Schubert and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/IEEEVR2020_BigHead.pdf
https://www.youtube.com/watch?v=14289nufYf0, YouTube Presentation},
doi = {10.1109/VR46266.2020.00-41},
year = {2020},
date = {2020-03-23},
booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR)},
pages = {425-433},
address = {Atlanta, Georgia},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|