2022
|
 | Jesus Ugarte; Nahal Norouzi; Austin Erickson; Gerd Bruder; Greg Welch
Distant Hand Interaction Framework in Augmented Reality Proceedings Article In: Proceedings of the 2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), pp. 962-963, IEEE IEEE, Christchurch, New Zealand, 2022. @inproceedings{Ugarte2022,
title = {Distant Hand Interaction Framework in Augmented Reality},
author = {Jesus Ugarte and Nahal Norouzi and Austin Erickson and Gerd Bruder and Greg Welch
},
url = {https://sreal.ucf.edu/wp-content/uploads/2022/05/Distant_Hand_Interaction_Framework_in_Augmented_Reality.pdf},
doi = {10.1109/VRW55335.2022.00332},
year = {2022},
date = {2022-03-16},
urldate = {2022-03-16},
booktitle = {Proceedings of the 2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
pages = {962-963},
publisher = {IEEE},
address = {Christchurch, New Zealand},
organization = {IEEE},
series = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)},
abstract = {Recent augmented reality (AR) head-mounted displays support shared experiences among multiple users in real physical spaces. While previous research looked at different embodied methods to enhance interpersonal communication cues, so far, less research looked at distant interaction in AR and, in particular, distant hand communication, which can open up new possibilities for scenarios, such as large-group collaboration. In this demonstration, we present a research framework for distant hand interaction in AR, including mapping techniques and visualizations. Our techniques are inspired by virtual reality (VR) distant hand interactions, but had to be adjusted due to the different context in AR and limited knowledge about the physical environment. We discuss different techniques for hand communication, including deictic pointing at a distance, distant drawing in AR, and distant communication through symbolic hand gestures.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Recent augmented reality (AR) head-mounted displays support shared experiences among multiple users in real physical spaces. While previous research looked at different embodied methods to enhance interpersonal communication cues, so far, less research looked at distant interaction in AR and, in particular, distant hand communication, which can open up new possibilities for scenarios, such as large-group collaboration. In this demonstration, we present a research framework for distant hand interaction in AR, including mapping techniques and visualizations. Our techniques are inspired by virtual reality (VR) distant hand interactions, but had to be adjusted due to the different context in AR and limited knowledge about the physical environment. We discuss different techniques for hand communication, including deictic pointing at a distance, distant drawing in AR, and distant communication through symbolic hand gestures. |
 | Nahal Norouzi; Matthew Gottsacker; Gerd Bruder; Pamela Wisniewski; Jeremy Bailenson; Greg Welch Virtual Humans with Pets and Robots: Exploring the Influence of Social Priming on One’s Perception of a Virtual Human Proceedings Article In: Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR), Christchurch, New Zealand, 2022., pp. 10, IEEE, 2022. @inproceedings{Norouzi2022,
title = {Virtual Humans with Pets and Robots: Exploring the Influence of Social Priming on One’s Perception of a Virtual Human},
author = {Nahal Norouzi and Matthew Gottsacker and Gerd Bruder and Pamela Wisniewski and Jeremy Bailenson and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2022/01/2022007720.pdf},
year = {2022},
date = {2022-03-16},
urldate = {2022-03-16},
booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR), Christchurch, New Zealand, 2022.},
pages = {10},
publisher = {IEEE},
abstract = {Social priming is the idea that observations of a virtual human (VH)engaged in short social interactions with a real or virtual human bystander can positively influence users’ subsequent interactions with that VH. In this paper we investigate the question of whether the positive effects of social priming are limited to interactions with humanoid entities. For instance, virtual dogs offer an attractive candidate for non-humanoid entities, as previous research suggests multiple positive effects. In particular, real human dog owners receive more positive attention from strangers than non-dog owners. To examine the influence of such social priming we carried out a human-subjects experiment with four conditions: three social priming conditions where a participant initially observed a VH interacting with one of three virtual entities (another VH, a virtual pet dog, or a virtual personal robot), and a non-social priming condition where a VH (alone) was intently looking at her phone as if reading something. We recruited 24 participants and conducted a mixed-methods analysis. We found that a VH’s prior social interactions with another VH and a virtual dog significantly increased participants’ perceptions of the VHs’ affective attraction. Also, participants felt more inclined to interact with the VH in the future in all of the social priming conditions. Qualitatively, we found that the social priming conditions resulted in a more positive user experience than the non-social priming condition. Also, the virtual dog and the virtual robot were perceived as a source of positive surprise, with participants appreciating the non-humanoid interactions for various reasons, such as the avoidance of social anxieties sometimes associated with humans.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Social priming is the idea that observations of a virtual human (VH)engaged in short social interactions with a real or virtual human bystander can positively influence users’ subsequent interactions with that VH. In this paper we investigate the question of whether the positive effects of social priming are limited to interactions with humanoid entities. For instance, virtual dogs offer an attractive candidate for non-humanoid entities, as previous research suggests multiple positive effects. In particular, real human dog owners receive more positive attention from strangers than non-dog owners. To examine the influence of such social priming we carried out a human-subjects experiment with four conditions: three social priming conditions where a participant initially observed a VH interacting with one of three virtual entities (another VH, a virtual pet dog, or a virtual personal robot), and a non-social priming condition where a VH (alone) was intently looking at her phone as if reading something. We recruited 24 participants and conducted a mixed-methods analysis. We found that a VH’s prior social interactions with another VH and a virtual dog significantly increased participants’ perceptions of the VHs’ affective attraction. Also, participants felt more inclined to interact with the VH in the future in all of the social priming conditions. Qualitatively, we found that the social priming conditions resulted in a more positive user experience than the non-social priming condition. Also, the virtual dog and the virtual robot were perceived as a source of positive surprise, with participants appreciating the non-humanoid interactions for various reasons, such as the avoidance of social anxieties sometimes associated with humans. |
 | Tabitha C. Peck; Jessica J. Good; Austin Erickson; Isaac Bynum; Gerd Bruder Effects of Transparency on Perceived Humanness: Implications for Rendering Skin Tones Using Optical See-Through Displays Journal Article In: IEEE Transactions on Visualization and Computer Graphics (TVCG), no. 01, pp. 1-11, 2022, ISSN: 1941-0506. @article{nokey,
title = {Effects of Transparency on Perceived Humanness: Implications for Rendering Skin Tones Using Optical See-Through Displays},
author = {Tabitha C. Peck; Jessica J. Good; Austin Erickson; Isaac Bynum; Gerd Bruder},
url = {https://sreal.ucf.edu/wp-content/uploads/2022/02/AR_and_Avatar_Transparency.pdf
https://www.youtube.com/watch?v=0tUlhbxhE6U&t=59s},
doi = {10.1109/TVCG.2022.3150521},
issn = {1941-0506},
year = {2022},
date = {2022-03-15},
urldate = {2022-03-15},
journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)},
number = {01},
pages = {1-11},
abstract = {Current optical see-through displays in the field of augmented reality are limited in their ability to display colors with low lightness in the hue, saturation, lightness (HSL) color space, causing such colors to appear transparent. This hardware limitation may add unintended bias into scenarios with virtual humans. Humans have varying skin tones including HSL colors with low lightness. When virtual humans are displayed with optical see-through devices, people with low lightness skin tones may be displayed semi-transparently while those with high lightness skin tones will be displayed more opaquely. For example, a Black avatar may appear semi-transparent in the same scene as a White avatar who will appear more opaque. We present an exploratory user study (N = 160) investigating whether differing opacity levels result in dehumanizing avatar and human faces. Results support that dehumanization occurs as opacity decreases. This suggests that in similar lighting, low lightness skin tones (e.g., Black faces) will be viewed as less human than high lightness skin tones (e.g., White faces). Additionally, the perceived emotionality of virtual human faces also predicts perceived humanness. Angry faces were seen overall as less human, and at lower opacity levels happy faces were seen as more human. Our results suggest that additional research is needed to understand the effects and interactions of emotionality and opacity on dehumanization. Further, we provide evidence that unintentional racial bias may be added when developing for optical see-through devices using virtual humans. We highlight the potential bias and discuss implications and directions for future research.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Current optical see-through displays in the field of augmented reality are limited in their ability to display colors with low lightness in the hue, saturation, lightness (HSL) color space, causing such colors to appear transparent. This hardware limitation may add unintended bias into scenarios with virtual humans. Humans have varying skin tones including HSL colors with low lightness. When virtual humans are displayed with optical see-through devices, people with low lightness skin tones may be displayed semi-transparently while those with high lightness skin tones will be displayed more opaquely. For example, a Black avatar may appear semi-transparent in the same scene as a White avatar who will appear more opaque. We present an exploratory user study (N = 160) investigating whether differing opacity levels result in dehumanizing avatar and human faces. Results support that dehumanization occurs as opacity decreases. This suggests that in similar lighting, low lightness skin tones (e.g., Black faces) will be viewed as less human than high lightness skin tones (e.g., White faces). Additionally, the perceived emotionality of virtual human faces also predicts perceived humanness. Angry faces were seen overall as less human, and at lower opacity levels happy faces were seen as more human. Our results suggest that additional research is needed to understand the effects and interactions of emotionality and opacity on dehumanization. Further, we provide evidence that unintentional racial bias may be added when developing for optical see-through devices using virtual humans. We highlight the potential bias and discuss implications and directions for future research. |
 | Isaac Bynum; Jessica J. Good; Gerd Bruder; Austin Erickson; Tabitha C. Peck The Effects of Transparency on Dehumanization of Black Avatars in Augmented Reality Proceedings Article In: Proceedings of the Annual Conference of the Society for Personality and Social Psychology, Society for Personality and Social Psychology San Francisco, CA, 2022. @inproceedings{Bynum2022,
title = {The Effects of Transparency on Dehumanization of Black Avatars in Augmented Reality},
author = {Isaac Bynum and Jessica J. Good and Gerd Bruder and Austin Erickson and Tabitha C. Peck},
url = {https://sreal.ucf.edu/wp-content/uploads/2022/07/spspPoster.pdf},
year = {2022},
date = {2022-02-16},
urldate = {2022-02-16},
booktitle = {Proceedings of the Annual Conference of the Society for Personality and Social Psychology},
address = {San Francisco, CA},
organization = {Society for Personality and Social Psychology},
series = {Annual Conference of the Society for Personality and Social Psychology},
howpublished = {Poster at Annual Conference of the Society for Personality and Social Psychology 2022},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
2021
|
 | Connor D. Flick; Courtney J. Harris; Nikolas T. Yonkers; Nahal Norouzi; Austin Erickson; Zubin Choudhary; Matt Gottsacker; Gerd Bruder; Gregory F. Welch Trade-offs in Augmented Reality User Interfaces for Controlling a Smart Environment Proceedings Article In: In Symposium on Spatial User Interaction (SUI '21), pp. 1-11, Association for Computing Machinery, New York, NY, USA, 2021. @inproceedings{Flick2021,
title = {Trade-offs in Augmented Reality User Interfaces for Controlling a Smart Environment},
author = {Connor D. Flick and Courtney J. Harris and Nikolas T. Yonkers and Nahal Norouzi and Austin Erickson and Zubin Choudhary and Matt Gottsacker and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_REU_Paper.pdf},
year = {2021},
date = {2021-11-09},
urldate = {2021-11-09},
booktitle = {In Symposium on Spatial User Interaction (SUI '21)},
pages = {1-11},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
abstract = {Smart devices and Internet of Things (IoT) technologies are replacing or being incorporated into traditional devices at a growing pace. The use of digital interfaces to interact with these devices has become a common occurrence in homes, work spaces, and various industries around the world. The most common interfaces for these connected devices focus on mobile apps or voice control via intelligent virtual assistants. However, with augmented reality (AR) becoming more popular and accessible among consumers, there are new opportunities for spatial user interfaces to seamlessly bridge the gap between digital and physical affordances.
In this paper, we present a human-subject study evaluating and comparing four user interfaces for smart connected environments: gaze input, hand gestures, voice input, and a mobile app. We assessed participants’ user experience, usability, task load, completion time, and preferences. Our results show multiple trade-offs between these interfaces across these measures. In particular, we found that gaze input shows great potential for future use cases, while both gaze input and hand gestures suffer from limited familiarity among users, compared to voice input and mobile apps.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Smart devices and Internet of Things (IoT) technologies are replacing or being incorporated into traditional devices at a growing pace. The use of digital interfaces to interact with these devices has become a common occurrence in homes, work spaces, and various industries around the world. The most common interfaces for these connected devices focus on mobile apps or voice control via intelligent virtual assistants. However, with augmented reality (AR) becoming more popular and accessible among consumers, there are new opportunities for spatial user interfaces to seamlessly bridge the gap between digital and physical affordances.
In this paper, we present a human-subject study evaluating and comparing four user interfaces for smart connected environments: gaze input, hand gestures, voice input, and a mobile app. We assessed participants’ user experience, usability, task load, completion time, and preferences. Our results show multiple trade-offs between these interfaces across these measures. In particular, we found that gaze input shows great potential for future use cases, while both gaze input and hand gestures suffer from limited familiarity among users, compared to voice input and mobile apps. |
 | Zubin Choudhary; Jesus Ugarte; Gerd Bruder; Greg Welch Real-Time Magnification in Augmented Reality Conference Proceedings of the 2021 ACM Spatial User Interaction, SUI 2021 ACM 2021. @conference{Choudhary2021d,
title = {Real-Time Magnification in Augmented Reality},
author = {Zubin Choudhary and Jesus Ugarte and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_AR_Magnification_DEMO.pdf},
year = {2021},
date = {2021-11-09},
urldate = {2021-11-09},
booktitle = {Proceedings of the 2021 ACM Spatial User Interaction},
pages = {1-2},
organization = {ACM},
series = {SUI 2021},
abstract = {With recent advances in augmented reality (AR) and computer vision it has become possible to magnify objects in real time in a user’s field of view. AR object magnification can have different purposes, such as enhancing human visual capabilities with the BigHead technique, which works by up-scaling human heads to communicate important facial cues over longer distances. For this
purpose, we created a prototype with a 4K camera mounted on a HoloLens 2. In this demo, we present the BigHead technique and proof of concept AR testbed to magnify heads in real-time. Further,
we describe how hand gestures are detected to control the scale and position of the magnified head. We discuss the technique and implementation, and propose future research directions.},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
With recent advances in augmented reality (AR) and computer vision it has become possible to magnify objects in real time in a user’s field of view. AR object magnification can have different purposes, such as enhancing human visual capabilities with the BigHead technique, which works by up-scaling human heads to communicate important facial cues over longer distances. For this
purpose, we created a prototype with a 4K camera mounted on a HoloLens 2. In this demo, we present the BigHead technique and proof of concept AR testbed to magnify heads in real-time. Further,
we describe how hand gestures are detected to control the scale and position of the magnified head. We discuss the technique and implementation, and propose future research directions. |
 | Matt Gottsacker; Nahal Norouzi; Kangsoo Kim; Gerd Bruder; Gregory F. Welch Diegetic Representations for Seamless Cross-Reality Interruptions Conference Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), 2021. @conference{Gottsacker2021,
title = {Diegetic Representations for Seamless Cross-Reality Interruptions},
author = {Matt Gottsacker and Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ISMAR_2021_Paper__Interruptions_.pdf},
year = {2021},
date = {2021-10-15},
urldate = {2021-10-15},
booktitle = {Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR)},
pages = {10},
abstract = {Due to the closed design of modern virtual reality (VR) head-mounted displays (HMDs), users tend to lose awareness of their real-world surroundings. This is particularly challenging when an-other person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP),which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded the highest quality inter-actions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We found reasonably high senses of co-presence with the partially and fully diegetic virtual representations. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions},
keywords = {},
pubstate = {published},
tppubtype = {conference}
}
Due to the closed design of modern virtual reality (VR) head-mounted displays (HMDs), users tend to lose awareness of their real-world surroundings. This is particularly challenging when an-other person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP),which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded the highest quality inter-actions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We found reasonably high senses of co-presence with the partially and fully diegetic virtual representations. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions |
 | Nahal Norouzi; Gerd Bruder; Austin Erickson; Kangsoo Kim; Jeremy Bailenson; Pamela J. Wisniewski; Charles E. Hughes; and Gregory F. Welch Virtual Animals as Diegetic Attention Guidance Mechanisms in 360-Degree Experiences Journal Article In: IEEE Transactions on Visualization and Computer Graphics (TVCG) Special Issue on ISMAR 2021, pp. 11, 2021. @article{Norouzi2021,
title = {Virtual Animals as Diegetic Attention Guidance Mechanisms in 360-Degree Experiences},
author = {Nahal Norouzi and Gerd Bruder and Austin Erickson and Kangsoo Kim and Jeremy Bailenson and Pamela J. Wisniewski and Charles E. Hughes and and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/IEEE_ISMAR_TVCG_2021.pdf},
year = {2021},
date = {2021-10-15},
urldate = {2021-10-15},
journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG) Special Issue on ISMAR 2021},
pages = {11},
abstract = {360-degree experiences such as cinematic virtual reality and 360-degree videos are becoming increasingly popular. In most examples, viewers can freely explore the content by changing their orientation. However, in some cases, this increased freedom may lead to viewers missing important events within such experiences. Thus, a recent research thrust has focused on studying mechanisms for guiding viewers’ attention while maintaining their sense of presence and fostering a positive user experience. One approach is the utilization of diegetic mechanisms, characterized by an internal consistency with respect to the narrative and the environment, for attention guidance. While such mechanisms are highly attractive, their uses and potential implementations are still not well understood. Additionally, acknowledging the user in 360-degree experiences has been linked to a higher sense of presence and connection. However, less is known when acknowledging behaviors are carried out by attention guiding mechanisms. To close these gaps, we conducted a within-subjects user study with five conditions of no guide and virtual arrows, birds, dogs, and dogs that acknowledge the user and the environment. Through our mixed-methods analysis, we found that the diegetic virtual animals resulted in a more positive user experience, all of which were at least as effective as the non-diegetic arrow in guiding users towards target events. The acknowledging dog received the most positive responses from our participants in terms of preference and user experience and significantly improved their sense of presence compared to the non-diegetic arrow. Lastly, three themes emerged from a qualitative analysis of our participants’ feedback, indicating the importance of the guide’s blending in, its acknowledging behavior, and participants’ positive associations as the main factors for our participants’ preferences.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
360-degree experiences such as cinematic virtual reality and 360-degree videos are becoming increasingly popular. In most examples, viewers can freely explore the content by changing their orientation. However, in some cases, this increased freedom may lead to viewers missing important events within such experiences. Thus, a recent research thrust has focused on studying mechanisms for guiding viewers’ attention while maintaining their sense of presence and fostering a positive user experience. One approach is the utilization of diegetic mechanisms, characterized by an internal consistency with respect to the narrative and the environment, for attention guidance. While such mechanisms are highly attractive, their uses and potential implementations are still not well understood. Additionally, acknowledging the user in 360-degree experiences has been linked to a higher sense of presence and connection. However, less is known when acknowledging behaviors are carried out by attention guiding mechanisms. To close these gaps, we conducted a within-subjects user study with five conditions of no guide and virtual arrows, birds, dogs, and dogs that acknowledge the user and the environment. Through our mixed-methods analysis, we found that the diegetic virtual animals resulted in a more positive user experience, all of which were at least as effective as the non-diegetic arrow in guiding users towards target events. The acknowledging dog received the most positive responses from our participants in terms of preference and user experience and significantly improved their sense of presence compared to the non-diegetic arrow. Lastly, three themes emerged from a qualitative analysis of our participants’ feedback, indicating the importance of the guide’s blending in, its acknowledging behavior, and participants’ positive associations as the main factors for our participants’ preferences. |
![[DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality](https://sreal.ucf.edu/wp-content/uploads/2021/08/teaser3-300x156.png) | Zubin Choudhary [DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality Proceedings Article In: pp. 4, IEEE 2021. @inproceedings{Choudhary2021bb,
title = {[DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality},
author = {Zubin Choudhary},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ISMAR_DC_2021-ver2.pdf},
year = {2021},
date = {2021-10-08},
urldate = {2021-10-08},
pages = {4},
organization = {IEEE},
series = {ISMAR 2021},
abstract = {Existing literature in the field of extended reality has demonstrate that visual/auditory manipulations can change a person’s perception and behavior. For example, a mismatch between the physical self and the virtual self can have psychological and behavioral implications. There are any different approaches that can incur a perceptual change. An under-explored field of research are gradual and subtle manipulations, such as scaling the food one eats or scaling the heads of people one sees. In this position paper, I provide an overview of my prior PhD research focusing on means to gradually and seamlessly scale visual and auditory stimuli in extended reality, and investigations of the corresponding changes in human perception. I discuss future research topics and potential questions to be discussed at the ISMAR 2021 Doctoral Consortium.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Existing literature in the field of extended reality has demonstrate that visual/auditory manipulations can change a person’s perception and behavior. For example, a mismatch between the physical self and the virtual self can have psychological and behavioral implications. There are any different approaches that can incur a perceptual change. An under-explored field of research are gradual and subtle manipulations, such as scaling the food one eats or scaling the heads of people one sees. In this position paper, I provide an overview of my prior PhD research focusing on means to gradually and seamlessly scale visual and auditory stimuli in extended reality, and investigations of the corresponding changes in human perception. I discuss future research topics and potential questions to be discussed at the ISMAR 2021 Doctoral Consortium. |
 | Austin Erickson; Dirk Reiners; Gerd Bruder; Greg Welch Augmenting Human Perception: Mediation of Extrasensory Signals in Head-Worn Augmented Reality Proceedings Article In: Proceedings of the 2021 International Symposium on Mixed and Augmented Reality, pp. 373-377, IEEE IEEE, 2021. @inproceedings{Erickson2021b,
title = {Augmenting Human Perception: Mediation of Extrasensory Signals in Head-Worn Augmented Reality},
author = {Austin Erickson and Dirk Reiners and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ismar21d-sub1093-i6.pdf},
doi = {10.1109/ISMAR-Adjunct54149.2021.00085},
year = {2021},
date = {2021-10-04},
urldate = {2021-10-04},
booktitle = {Proceedings of the 2021 International Symposium on Mixed and Augmented Reality},
pages = {373-377},
publisher = {IEEE},
organization = {IEEE},
series = {ISMAR 2021},
abstract = {Mediated perception systems are systems in which sensory signals from the user's environment are mediated to the user's sensory channels. This type of system has great potential for enhancing the perception of the user via augmenting and/or diminishing incoming sensory signals according to the user's context, preferences, and perceptual capability. They also allow for extending the perception of the user to enable them to sense signals typically imperceivable to human senses, such as regions of the electromagnetic spectrum beyond visible light. However, in order to effectively mediate extrasensory data to the user, we need to understand when and how such data should be presented to them.
In this paper, we present a prototype mediated perception system that maps extrasensory spatial data into visible light displayed within an augmented reality (AR) optical see-through head-mounted display (OST-HMD). Although the system is generalized such that it could support any spatial sensor data with minor modification, we chose to test the system using thermal infrared sensors. This system improves upon previous extended perception augmented reality prototypes in that it is capable of projecting egocentric sensor data in real time onto a 3D mesh generated by the OST-HMD that is representative of the user's environment. We present the lessons learned through iterative improvements to the system, as well as a performance analysis of the system and recommendations for future work.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Mediated perception systems are systems in which sensory signals from the user's environment are mediated to the user's sensory channels. This type of system has great potential for enhancing the perception of the user via augmenting and/or diminishing incoming sensory signals according to the user's context, preferences, and perceptual capability. They also allow for extending the perception of the user to enable them to sense signals typically imperceivable to human senses, such as regions of the electromagnetic spectrum beyond visible light. However, in order to effectively mediate extrasensory data to the user, we need to understand when and how such data should be presented to them.
In this paper, we present a prototype mediated perception system that maps extrasensory spatial data into visible light displayed within an augmented reality (AR) optical see-through head-mounted display (OST-HMD). Although the system is generalized such that it could support any spatial sensor data with minor modification, we chose to test the system using thermal infrared sensors. This system improves upon previous extended perception augmented reality prototypes in that it is capable of projecting egocentric sensor data in real time onto a 3D mesh generated by the OST-HMD that is representative of the user's environment. We present the lessons learned through iterative improvements to the system, as well as a performance analysis of the system and recommendations for future work. |
 | Zubin Choudhary; Gerd Bruder; Gregory F. Welch Scaled User Embodied Representations in Virtual and Augmented Reality Proceedings Article In: Workshop on User-Embodied Interaction in Virtual Reality (UIVR) 2021, 2021. @inproceedings{Choudhary2021b,
title = {Scaled User Embodied Representations in Virtual and Augmented Reality},
author = {Zubin Choudhary and Gerd Bruder and Gregory F. Welch },
url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/UIVR21-Submission-Final-1.pdf},
year = {2021},
date = {2021-09-08},
urldate = {2021-09-08},
publisher = {Workshop on User-Embodied Interaction in Virtual Reality (UIVR) 2021},
abstract = {Embodied user representations are important for a wide range of application domains involving human social interactions. While traditionally, human appearances were defined by the physics of the real world, we now have the means to go beyond such limitations with virtual, mixed, and augmented reality (VR/MR/AR) technologies. Different human appearances can have an impact on their perception and behavior with other users in social or collaborative environments. There is a growing literature about the impact of different user representations and behaviors on perception; however, investigating the impact of visual scaling of human body parts has so far received less attention from the research community.
In this paper, we present and discuss our position that scaled user embodied representations in VR/MR/AR could lead to significant improvements for a range of use cases. We present our previous work on this topic, including the Big Head technique, through which virtual human heads can be scaled up or down. We motivate how it can improve the visibility of facial information, such as facial expressions and eye gaze, over long distances. Even when a human would be barely visible at a distance in the real world, this technique can recover lost embodied cues. We discuss perceptual effects of scaling human body parts and outline future research.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Embodied user representations are important for a wide range of application domains involving human social interactions. While traditionally, human appearances were defined by the physics of the real world, we now have the means to go beyond such limitations with virtual, mixed, and augmented reality (VR/MR/AR) technologies. Different human appearances can have an impact on their perception and behavior with other users in social or collaborative environments. There is a growing literature about the impact of different user representations and behaviors on perception; however, investigating the impact of visual scaling of human body parts has so far received less attention from the research community.
In this paper, we present and discuss our position that scaled user embodied representations in VR/MR/AR could lead to significant improvements for a range of use cases. We present our previous work on this topic, including the Big Head technique, through which virtual human heads can be scaled up or down. We motivate how it can improve the visibility of facial information, such as facial expressions and eye gaze, over long distances. Even when a human would be barely visible at a distance in the real world, this technique can recover lost embodied cues. We discuss perceptual effects of scaling human body parts and outline future research. |
 | Kangsoo Kim; Nahal Norouzi; Dongsik Jo; Gerd Bruder; and Gregory F. Welch The Augmented Reality Internet of Things: Opportunities of Embodied Interactions in Transreality Book Chapter Forthcoming In: Nee, A. Y. C.; Ong, S. K. (Ed.): vol. Handbook of Augmented Reality, pp. 60, Springer , Forthcoming. @inbook{Kim2021,
title = {The Augmented Reality Internet of Things: Opportunities of Embodied Interactions in Transreality},
author = {Kangsoo Kim and Nahal Norouzi and Dongsik Jo and Gerd Bruder and and Gregory F. Welch},
editor = {A. Y. C. Nee and S. K. Ong},
year = {2021},
date = {2021-09-01},
urldate = {2021-09-01},
volume = {Handbook of Augmented Reality},
pages = {60},
publisher = {Springer },
abstract = {Human society is encountering a new wave of advancements related to smart connected technologies with the convergence of different traditionally separate fields, which can be characterized by a fusion of technologies that merge and tightly integrate the physical, digital, and biological spheres. In this new paradigm of convergence, all the physical and digital things will become more and more intelligent and connected to each other through the Internet, and the boundary between them will blur and become seamless. In particular, Augmented/Mixed Reality (AR/MR) combines virtual content with the real environment and is experiencing an unprecedented golden era along with dramatic technological achievements and increasing public interest. Together with advanced Artificial Intelligence (AI) and ubiquitous computing empowered by the Internet of Things/Everything (IoT/IoE) systems, AR can be our ultimate interface to interact with both digital (virtual) and physical (real) worlds while pervasively mediating and enriching our lives. In this chapter, we describe the concept of transreality that symbiotically connects the physical and the virtual worlds incorporating the aforementioned advanced technologies, and illustrate how such transreality environments can transform our activities in it, providing intelligent and intuitive interaction with the environment while exploring prior research literature in this domain. We also present the potential of virtually embodied interactions—e.g., employing virtual avatars and agents—in highly connected transreality spaces for enhancing human abilities and perception. Recent ongoing research focusing on the effects of embodied interaction are described and discussed in different aspects such as perceptual, cognitive, and social contexts. The chapter will end with discussions on potential research directions in the future and implications related to the user experience in transreality.},
keywords = {},
pubstate = {forthcoming},
tppubtype = {inbook}
}
Human society is encountering a new wave of advancements related to smart connected technologies with the convergence of different traditionally separate fields, which can be characterized by a fusion of technologies that merge and tightly integrate the physical, digital, and biological spheres. In this new paradigm of convergence, all the physical and digital things will become more and more intelligent and connected to each other through the Internet, and the boundary between them will blur and become seamless. In particular, Augmented/Mixed Reality (AR/MR) combines virtual content with the real environment and is experiencing an unprecedented golden era along with dramatic technological achievements and increasing public interest. Together with advanced Artificial Intelligence (AI) and ubiquitous computing empowered by the Internet of Things/Everything (IoT/IoE) systems, AR can be our ultimate interface to interact with both digital (virtual) and physical (real) worlds while pervasively mediating and enriching our lives. In this chapter, we describe the concept of transreality that symbiotically connects the physical and the virtual worlds incorporating the aforementioned advanced technologies, and illustrate how such transreality environments can transform our activities in it, providing intelligent and intuitive interaction with the environment while exploring prior research literature in this domain. We also present the potential of virtually embodied interactions—e.g., employing virtual avatars and agents—in highly connected transreality spaces for enhancing human abilities and perception. Recent ongoing research focusing on the effects of embodied interaction are described and discussed in different aspects such as perceptual, cognitive, and social contexts. The chapter will end with discussions on potential research directions in the future and implications related to the user experience in transreality. |
 | Austin Erickson; Kangsoo Kim; Gerd Bruder; Gregory F. Welch Beyond Visible Light: User and Societal Impacts of Egocentric Multispectral Vision Proceedings Article In: Chen, J. Y. C.; Fragomeni, G. (Ed.): In Proceedings of the 2021 International Conference on Virtual, Augmented, and Mixed Reality, pp. 19, Springer Nature, Washington D.C, 2021. @inproceedings{Erickson2020fb,
title = {Beyond Visible Light: User and Societal Impacts of Egocentric Multispectral Vision},
author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Gregory F. Welch},
editor = {J. Y. C. Chen and G. Fragomeni},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/03/VAMR21-MSV.pdf},
doi = {10.1007/978-3-030-77599-5_23},
year = {2021},
date = {2021-07-24},
booktitle = {In Proceedings of the 2021 International Conference on Virtual, Augmented, and Mixed Reality},
number = {23},
pages = {19},
publisher = {Springer Nature},
address = {Washington D.C},
abstract = {Multi-spectral imagery is becoming popular for a wide range of application fields from agriculture to healthcare, mainly stemming from advances in consumer sensor and display technologies. Modern augmented reality (AR) head-mounted displays already combine a multitude of sensors and are well-suited for integration with additional sensors, such as cameras capturing information from different parts of the electromagnetic spectrum. In this paper, we describe a novel multi-spectral vision prototype based on the Microsoft HoloLens 1, which we extended with two thermal infrared (IR) cameras and two ultraviolet (UV) cameras. We performed an exploratory experiment, in which participants wore the prototype for an extended period of time and assessed its potential to augment our daily activities. Our report covers a discussion of qualitative insights on personal and societal uses of such novel multi-spectral vision systems, including their applicability for use during the COVID-19 pandemic},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Multi-spectral imagery is becoming popular for a wide range of application fields from agriculture to healthcare, mainly stemming from advances in consumer sensor and display technologies. Modern augmented reality (AR) head-mounted displays already combine a multitude of sensors and are well-suited for integration with additional sensors, such as cameras capturing information from different parts of the electromagnetic spectrum. In this paper, we describe a novel multi-spectral vision prototype based on the Microsoft HoloLens 1, which we extended with two thermal infrared (IR) cameras and two ultraviolet (UV) cameras. We performed an exploratory experiment, in which participants wore the prototype for an extended period of time and assessed its potential to augment our daily activities. Our report covers a discussion of qualitative insights on personal and societal uses of such novel multi-spectral vision systems, including their applicability for use during the COVID-19 pandemic |
 | Ryan Schubert; Gerd Bruder; Alyssa Tanaka; Francisco Guido-Sanz; Gregory F. Welch Mixed Reality Technology Capabilities for Combat-Casualty Handoff Training Proceedings Article In: Chen, Jessie Y. C.; Fragomeni, Gino (Ed.): International Conference on Human-Computer Interaction, pp. 695-711, Springer International Publishing, Cham, 2021, ISBN: 978-3-030-77599-5. @inproceedings{Schubert2021mixed,
title = {Mixed Reality Technology Capabilities for Combat-Casualty Handoff Training},
author = {Ryan Schubert and Gerd Bruder and Alyssa Tanaka and Francisco Guido-Sanz and Gregory F. Welch},
editor = {Jessie Y. C. Chen and Gino Fragomeni},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/07/Schubert2021_MixedRealityTechnologyCapabiliesForCombatCasualtyHandoffTraining-2.pdf},
doi = {10.1007/978-3-030-77599-5_47},
isbn = {978-3-030-77599-5},
year = {2021},
date = {2021-07-03},
booktitle = {International Conference on Human-Computer Interaction},
volume = {12770},
pages = {695-711},
publisher = {Springer International Publishing},
address = {Cham},
abstract = {Patient handoffs are a common, yet frequently error prone occurrence, particularly in complex or challenging battlefield situations.
Specific protocols exist to help simplify and reinforce conveying of necessary information during a combat-casualty handoff, and training can both reinforce correct behavior and protocol usage while providing relatively safe initial exposure to many of the complexities and variabilities of real handoff situations, before a patient’s life is at stake. Here we discuss a variety of mixed reality capabilities and training contexts that can manipulate many of these handoff complexities in a controlled manner. We finally discuss some future human-subject user study design considerations, including aspects of handoff training, evaluation or improvement of a specific handoff protocol, and how the same technology could be leveraged for operational use.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Patient handoffs are a common, yet frequently error prone occurrence, particularly in complex or challenging battlefield situations.
Specific protocols exist to help simplify and reinforce conveying of necessary information during a combat-casualty handoff, and training can both reinforce correct behavior and protocol usage while providing relatively safe initial exposure to many of the complexities and variabilities of real handoff situations, before a patient’s life is at stake. Here we discuss a variety of mixed reality capabilities and training contexts that can manipulate many of these handoff complexities in a controlled manner. We finally discuss some future human-subject user study design considerations, including aspects of handoff training, evaluation or improvement of a specific handoff protocol, and how the same technology could be leveraged for operational use. |
 | Mindi Anderson; Frank Guido-Sanz; Desiree A. Díaz; Gregory F. Welch; Laura Gonzalez Poster: Using XR Technology to Innovate Healthcare Education Presentation 20.06.2021. @misc{Anderson2021b,
title = {Poster: Using XR Technology to Innovate Healthcare Education},
author = {Mindi Anderson and Frank Guido-Sanz and Desiree A. Díaz and Gregory F. Welch and Laura Gonzalez},
url = {https://www.inacsl.org/education/future-conferences/
https://sreal.ucf.edu/wp-content/uploads/2021/11/1559212-1621968939.pdf},
year = {2021},
date = {2021-06-20},
urldate = {2021-06-20},
keywords = {},
pubstate = {published},
tppubtype = {presentation}
}
|
 | Austin Erickson; Kangsoo Kim; Alexis Lambert; Gerd Bruder; Michael P. Browne; Greg Welch An Extended Analysis on the Benefits of Dark Mode User Interfaces in Optical See-Through Head-Mounted Displays Journal Article In: ACM Transactions on Applied Perception, vol. 18, no. 3, pp. 22, 2021. @article{Erickson2021,
title = {An Extended Analysis on the Benefits of Dark Mode User Interfaces in Optical See-Through Head-Mounted Displays},
author = {Austin Erickson and Kangsoo Kim and Alexis Lambert and Gerd Bruder and Michael P. Browne and Greg Welch},
editor = {Victoria Interrante and Martin Giese},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/03/ACM_TAP2020_DarkMode1_5.pdf},
doi = {https://doi.org/10.1145/3456874},
year = {2021},
date = {2021-05-20},
journal = {ACM Transactions on Applied Perception},
volume = {18},
number = {3},
pages = {22},
abstract = {Light-on-dark color schemes, so-called “Dark Mode,” are becoming more and more popular over a wide range of display technologies and application fields. Many people who have to look at computer screens for hours at a time, such as computer programmers and computer graphics artists, indicate a preference for switching colors on a computer screen from dark text on a light background to light text on a dark background due to perceived advantages related to visual comfort and acuity, specifically when working in low-light environments.
In this paper, we investigate the effects of dark mode color schemes in the field of optical see-through head-mounted displays (OST-HMDs), where the characteristic “additive” light model implies that bright graphics are visible but dark graphics are transparent. We describe two human-subject studies in which we evaluated a normal and inverted color mode in front of different physical backgrounds and different lighting conditions. Our results indicate that dark mode graphics displayed on the HoloLens have significant benefits for visual acuity, and usability, while user preferences depend largely on the lighting in the physical environment. We discuss the implications of these effects on user interfaces and applications.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Light-on-dark color schemes, so-called “Dark Mode,” are becoming more and more popular over a wide range of display technologies and application fields. Many people who have to look at computer screens for hours at a time, such as computer programmers and computer graphics artists, indicate a preference for switching colors on a computer screen from dark text on a light background to light text on a dark background due to perceived advantages related to visual comfort and acuity, specifically when working in low-light environments.
In this paper, we investigate the effects of dark mode color schemes in the field of optical see-through head-mounted displays (OST-HMDs), where the characteristic “additive” light model implies that bright graphics are visible but dark graphics are transparent. We describe two human-subject studies in which we evaluated a normal and inverted color mode in front of different physical backgrounds and different lighting conditions. Our results indicate that dark mode graphics displayed on the HoloLens have significant benefits for visual acuity, and usability, while user preferences depend largely on the lighting in the physical environment. We discuss the implications of these effects on user interfaces and applications. |
 | Hiroshi Furuya; Kangsoo Kim; Gerd Bruder; Pamela J. Wisniewski; Gregory F. Welch Autonomous Vehicle Visual Embodiment for Pedestrian Interactions in Crossing Scenarios: Virtual Drivers in AVs for Pedestrian Crossing Proceedings Article In: Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems, pp. 7, Association for Computing Machinery, New York, NY, USA, 2021, ISBN: 9781450380959. @inproceedings{Furuya2021,
title = {Autonomous Vehicle Visual Embodiment for Pedestrian Interactions in Crossing Scenarios: Virtual Drivers in AVs for Pedestrian Crossing},
author = {Hiroshi Furuya and Kangsoo Kim and Gerd Bruder and Pamela J. Wisniewski and Gregory F. Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/06/Furuya2021.pdf},
doi = {10.1145/3411763.3451626},
isbn = {9781450380959},
year = {2021},
date = {2021-05-08},
booktitle = {Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems},
number = {304},
pages = {7},
publisher = {Association for Computing Machinery},
address = {New York, NY, USA},
series = {CHI EA'21},
abstract = {This work presents a novel prototype autonomous vehicle (AV) human-machine interface (HMI) in virtual reality (VR) that utilizes a human-like visual embodiment in the driver's seat of an AV to communicate AV intent to pedestrians in a crosswalk scenario. There is currently a gap in understanding the use of virtual humans in AV HMIs for pedestrian crossing despite the demonstrated efficacy of human-like interfaces in improving human-machine relationships. We conduct a 3x2 within-subjects experiment in VR using our prototype to assess the effects of a virtual human visual embodiment AV HMI on pedestrian crossing behavior and experience. In the experiment participants walk across a virtual crosswalk in front of an AV. How long they took to decide to cross and how long it took for them to reach the other side were collected, in addition to their subjective preferences and feelings of safety. Of 26 participants, 25 preferred the condition with the most anthropomorphic features. An intermediate condition where a human-like virtual driver was present but did not exhibit any behaviors was least preferred and also had a significant effect on time to decide. This work contributes the first empirical work on using human-like visual embodiments for AV HMIs.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
This work presents a novel prototype autonomous vehicle (AV) human-machine interface (HMI) in virtual reality (VR) that utilizes a human-like visual embodiment in the driver's seat of an AV to communicate AV intent to pedestrians in a crosswalk scenario. There is currently a gap in understanding the use of virtual humans in AV HMIs for pedestrian crossing despite the demonstrated efficacy of human-like interfaces in improving human-machine relationships. We conduct a 3x2 within-subjects experiment in VR using our prototype to assess the effects of a virtual human visual embodiment AV HMI on pedestrian crossing behavior and experience. In the experiment participants walk across a virtual crosswalk in front of an AV. How long they took to decide to cross and how long it took for them to reach the other side were collected, in addition to their subjective preferences and feelings of safety. Of 26 participants, 25 preferred the condition with the most anthropomorphic features. An intermediate condition where a human-like virtual driver was present but did not exhibit any behaviors was least preferred and also had a significant effect on time to decide. This work contributes the first empirical work on using human-like visual embodiments for AV HMIs. |
 | Mindi Anderson; Frank Guido-Sanz; Desiree A. Díaz; Benjamin Lok; Jacob Stuart; Ilerioluwa Akinnola; Gregory Welch Augmented Reality in Nurse Practitioner Education: Using a Triage Scenario to Pilot Technology Usability and Effectiveness Journal Article In: Clinical Simulation in Nursing, vol. 54, pp. 105-112, 2021, ISSN: 1876-1399. @article{Anderson2021,
title = {Augmented Reality in Nurse Practitioner Education: Using a Triage Scenario to Pilot Technology Usability and Effectiveness},
author = {Mindi Anderson and Frank Guido-Sanz and Desiree A. Díaz and Benjamin Lok and Jacob Stuart and Ilerioluwa Akinnola and Gregory Welch},
url = {https://www.sciencedirect.com/science/article/pii/S1876139921000098
https://sreal.ucf.edu/wp-content/uploads/2022/08/MindiAnderson2021qd.pdf
},
issn = {1876-1399},
year = {2021},
date = {2021-05-01},
urldate = {2021-05-01},
journal = {Clinical Simulation in Nursing},
volume = {54},
pages = {105-112},
abstract = {Background
Before implementation, simulations and new technologies should be piloted for usability and effectiveness. Simulationists and augmented reality (AR) researchers developed an augmented reality (AR) triage scenario for Nurse Practitioner (NP) students.
Methods
A mixed-method, exploratory, pilot study was carried out with NP students and other volunteers. Participants completed several tools to appraise the usability of the AR modality and the effectiveness of the scenario for learning. Open-ended questions were asked, and qualitative themes were obtained via content analysis.
Results
Mixed results were received by the twelve participants (8 students, 4 other volunteers). There were some issues with usability, and technical challenges occurred. The debriefing was found to be effective, and favorable comments were made on simulation realism. Further preparation for the content and technology, along with more practice, was inferred. Those with reported previous AR experience found the experience more effective.
Conclusions
Further improvements are needed with usability of the AR modality. Debriefing can be effective and the simulation realistic. Participants need further preparation in triaging and use of the technology, and more practice is needed. AR simulations have promise for use in NP education.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
Background
Before implementation, simulations and new technologies should be piloted for usability and effectiveness. Simulationists and augmented reality (AR) researchers developed an augmented reality (AR) triage scenario for Nurse Practitioner (NP) students.
Methods
A mixed-method, exploratory, pilot study was carried out with NP students and other volunteers. Participants completed several tools to appraise the usability of the AR modality and the effectiveness of the scenario for learning. Open-ended questions were asked, and qualitative themes were obtained via content analysis.
Results
Mixed results were received by the twelve participants (8 students, 4 other volunteers). There were some issues with usability, and technical challenges occurred. The debriefing was found to be effective, and favorable comments were made on simulation realism. Further preparation for the content and technology, along with more practice, was inferred. Those with reported previous AR experience found the experience more effective.
Conclusions
Further improvements are needed with usability of the AR modality. Debriefing can be effective and the simulation realistic. Participants need further preparation in triaging and use of the technology, and more practice is needed. AR simulations have promise for use in NP education. |
 | Zubin Choudhary; Matt Gottsacker; Kangsoo Kim; Ryan Schubert; Jeanine Stefanucci; Gerd Bruder; Greg Welch Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality Proceedings Article In: IEEE Virtual Reality (VR), 2021, 2021. @inproceedings{Choudhary2021,
title = {Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality},
author = {Zubin Choudhary and Matt Gottsacker and Kangsoo Kim and Ryan Schubert and Jeanine Stefanucci and Gerd Bruder and Greg Welch},
url = {https://sreal.ucf.edu/wp-content/uploads/2021/04/C2593-Revisiting-Distance-Perception-with-Scaled-Embodied-Cues-in-Social-Virtual-Reality-7.pdf},
year = {2021},
date = {2021-04-01},
publisher = {IEEE Virtual Reality (VR), 2021},
abstract = {Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically misestimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars’ embodied cues in Social VR are often scaled, e.g., by making one’s head bigger or one’s voice louder, to make social cues more pronounced over longer distances.
In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.
},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically misestimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars’ embodied cues in Social VR are often scaled, e.g., by making one’s head bigger or one’s voice louder, to make social cues more pronounced over longer distances.
In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR.
|
2020
|
![[Demo] Towards Interactive Virtual Dogs as a Pervasive Social Companion in Augmented Reality](https://sreal.ucf.edu/wp-content/uploads/2020/12/demoFig-e1607360623259.png) | Nahal Norouzi; Kangsoo Kim; Gerd Bruder; Greg Welch [Demo] Towards Interactive Virtual Dogs as a Pervasive Social Companion in Augmented Reality Proceedings Article In: Proceedings of the combined International Conference on Artificial Reality & Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE)., pp. 29-30, 2020, (Best Demo Audience Choice Award). @inproceedings{Norouzi2020d,
title = {[Demo] Towards Interactive Virtual Dogs as a Pervasive Social Companion in Augmented Reality},
author = {Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Greg Welch },
url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/029-030.pdf},
doi = {https://doi.org/10.2312/egve.20201283},
year = {2020},
date = {2020-12-04},
booktitle = {Proceedings of the combined International Conference on Artificial Reality & Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE).},
pages = {29-30},
abstract = {Pets and animal-assisted intervention sessions have shown to be beneficial for humans' mental, social, and physical health. However, for specific populations, factors such as hygiene restrictions, allergies, and care and resource limitations reduce interaction opportunities. In parallel, understanding the capabilities of animals' technological representations, such as robotic and digital forms, have received considerable attention and has fueled the utilization of many of these technological representations. Additionally, recent advances in augmented reality technology have allowed for the realization of virtual animals with flexible appearances and behaviors to exist in the real world. In this demo, we present a companion virtual dog in augmented reality that aims to facilitate a range of interactions with populations, such as children and older adults. We discuss the potential benefits and limitations of such a companion and propose future use cases and research directions.},
note = {Best Demo Audience Choice Award},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Pets and animal-assisted intervention sessions have shown to be beneficial for humans' mental, social, and physical health. However, for specific populations, factors such as hygiene restrictions, allergies, and care and resource limitations reduce interaction opportunities. In parallel, understanding the capabilities of animals' technological representations, such as robotic and digital forms, have received considerable attention and has fueled the utilization of many of these technological representations. Additionally, recent advances in augmented reality technology have allowed for the realization of virtual animals with flexible appearances and behaviors to exist in the real world. In this demo, we present a companion virtual dog in augmented reality that aims to facilitate a range of interactions with populations, such as children and older adults. We discuss the potential benefits and limitations of such a companion and propose future use cases and research directions. |