@article{Bruder2024, title = {Visual Factors Influencing Trust and Reliance with Augmented Reality Systems}, author = {Gerd Bruder and Michael Browne and Zubin Choudhary and Austin Erickson and Hiroshi Furuya and Matt Gottsacker and Ryan Schubert and Gregory Welch}, year = {2024}, date = {2024-05-17}, urldate = {2024-05-17}, journal = {Journal of Vision Abstracts—Vision Sciences Society (VSS) Annual Meeting}, abstract = {Augmented Reality (AR) systems are increasingly used for simulations, training, and operations across a wide range of application fields. Unfortunately, the imagery that current AR systems create often does not match our visual perception of the real world, which can make users feel like the AR system is not believable. This lack of belief can lead to negative training or experiences, where users lose trust in the AR system and adjust their reliance on AR. The latter is characterized by users adopting different cognitive perception-action pathways by which they integrate AR visual information for spatial tasks. In this work, we present a series of six within-subjects experiments (each N=20) in which we investigated trust in AR with respect to two display factors (field of view and visual contrast), two tracking factors (accuracy and precision), and two network factors (latency and dropouts). Participants performed a 360-degree visual search-and-selection task in a hybrid setup involving an AR head-mounted display and a CAVE-like simulated real environment. Participants completed the experiments with four perception-action pathways that represent different levels of the users’ reliance on an AR system: AR-Only (only relying on AR), AR-First (prioritizing AR over real world), Real-First (prioritizing real world over AR), and Real-Only (only relying on real world). Our results show that participants’ perception-action pathways and objective task performance were significantly affected by all six tested AR factors. In contrast, we found that their subjective responses for trust and reliance were often more affected by slight AR system differences than would elicit objective performance differences, and participants tended to overestimate or underestimate the trustworthiness of the AR system. Participants showed significantly higher task performance gains if their sense of trust was well-calibrated to the trustworthiness of the AR system, highlighting the importance of effectively managing users’ trust in future AR systems. Acknowledgements: This material includes work supported in part by Vision Products LLC via US Air Force Research Laboratory (AFRL) Award Number FA864922P1038, and the Office of Naval Research under Award Numbers N00014-21-1-2578 and N00014-21-1-2882 (Dr. Peter Squire, Code 34).}, keywords = {A-ae, A-gb, A-gfw, A-hf, A-mg, A-rs, A-zc, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Browne2024ut, title = {Understanding the impact of trust on performance in a training system using augmented reality}, author = {Michael P. Browne and Gregory F. Welch and Gerd Bruder and Ryan Schubert}, year = {2024}, date = {2024-04-22}, urldate = {2024-04-22}, booktitle = {Proceedings of SPIE Conference 13051: Artificial Intelligence and Machine Learning for Multi-Domain Operations Applications VI}, abstract = {The information presented by AR systems may not be 100% accurate, and anomalies like tracker errors, lack of opacity compared to the background and reduced field of view (FOV) can make users feel like an AR training system is not believable. This lack of belief can lead to negative training, where trainees adjust how they train due to flaws in the training system and are therefore less prepared for actual battlefield situations. We have completed an experiment to investigate trust, reliance, and human task performance in an augmented reality three-dimensional experimental scenario. Specifically, we used a methodology in which simulated real (complex) entities are supplemented by abstracted (basic) cues presented as overlays in an AR head mounted display (HMD) in a visual search and awareness task. We simulated properties of different AR displays to determine which of the properties most affect training efficacy. Results from our experiment will feed directly into the design of training systems that use AR/MR displays and will help increase the efficacy of training.}, keywords = {A-gb, A-gfw, A-rs, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2024aa, title = {Audiovisual Detection of Expectation Violations in Disparate Home Automation Systems}, author = {Greg Welch and Gerd Bruder and Ryan Schubert and Austin Erickson}, url = {https://sreal.ucf.edu/wp-content/uploads/2024/04/US11954900.pdf https://ppubs.uspto.gov/dirsearch-public/print/downloadPdf/11954900 }, year = {2024}, date = {2024-04-09}, urldate = {2024-04-09}, number = {US 11,954,900}, abstract = {The invention pertains to methods for monitoring the operational status of a home automation system through extrinsic visual and audible means. Initial training periods involve capturing image and audio data representative of nominal operation, which is then processed to identify operational indicators. Unsupervised machine learning models are trained with these indicators to construct a model of normalcy and identify expectation violations in the system's operational pattern. After meeting specific stopping criteria, real-time monitoring is initiated. When an expectation violation is detected, contrastive collages or sequences are generated comprising nominal and anomalous data. These are then transmitted to an end user, effectively conveying the context of the detected anomalies. Further features include providing deep links to smartphone applications for home automation configuration and the use of auditory scene analysis techniques. The invention provides a multi-modal approach to home automation monitoring, leveraging machine learning for robust anomaly detection. }, keywords = {A-ae, A-gb, A-gfw, A-rs, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Battistel2024co, title = {Chill or Warmth: Exploring Temperature's Impact on Interpersonal Boundaries in VR}, author = {Laura Battistel and Matt Gottsacker and Greg Welch and Gerd Bruder and Massimiliano Zampini and Riccardo Parin}, url = {https://sreal.ucf.edu/wp-content/uploads/2024/02/Battistel2024co.pdf}, year = {2024}, date = {2024-03-16}, urldate = {2024-03-16}, booktitle = {Adjunct Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR): 2nd Annual Workshop on Multi-modal Affective and Social Behavior Analysis and Synthesis in Extended Reality (MASSXR)}, pages = {1-3}, abstract = {This position paper outlines a study on the influence of avatars displaying warmth or coldness cues on interpersonal space in virtual reality. Participants will engage in a comfort-distance task, approaching avatars exhibiting thermoregulatory behaviors. Anticipated findings include a reduction in interpersonal distance with warm cues and an increase with cold cues. The study will offer insights into the complex interplay between temperature, social perception, and interpersonal space.}, keywords = {A-gb, A-gfw, A-mg, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Mostajeran2024ac, title = {Analyzing Cognitive Demands and Detection Thresholds for Redirected Walking in Immersive Forest and Urban Environments}, author = {Fariba Mostajeran and Sebastian Schneider and Gerd Bruder and Simone Kühn and Frank Steinicke}, year = {2024}, date = {2024-03-16}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {1-11}, abstract = {Redirected walking is a locomotion technique that allows users to naturally walk through large immersive virtual environments (IVEs) by guiding them on paths that might vary from the paths they walk in the real world. While this technique enables the exploration of larger spaces in the IVE via natural walking, previous work has shown that it induces extra cognitive load for the users. On the other hand, previous research has shown that exposure to virtual nature environments can restore users’ diminished attentional capacities and lead to enhanced cognitive performances. Therefore, the aim of this paper is to investigate if the environment in which the user is redirected has the potential to reduce its cognitive demands. For this purpose, we conducted an experiment with 28 participants, who performed a spatial working memory task (i.e., 2-back test) while walking and being redirected with different gains in two different IVEs (i.e., (i) forest and (ii) urban). The results of frequentist and Bayesian analysis are consistent and provide evidence against an effect of the type of IVE on detection thresholds as well as cognitive and locomotion performances. Therefore, redirected walking is robust to the variation of the IVEs tested in this experiment. The results partly challenge previous research findings and, therefore, require future work in this direction.}, keywords = {A-gb, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Benjamin2024et, title = {Evaluating Transitive Perceptual Effects Between Virtual Entities in Outdoor Augmented Reality}, author = {Juanita Benjamin and Austin Erickson and Matt Gottsacker and Gerd Bruder and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2024/02/Benjamin2024.pdf}, year = {2024}, date = {2024-03-16}, urldate = {2024-03-16}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {1-11}, abstract = {Augmented reality (AR) head-mounted displays (HMDs) provide users with a view in which digital content is blended spatially with the outside world. However, one critical issue faced with such display technologies is misperception, i.e., perceptions of computer-generated content that differs from our human perception of other real-world objects or entities. Misperception can lead to mistrust in these systems and negative impacts in a variety of application fields. Although there is a considerable amount of research investigating either size, distance, or speed misperception in AR, far less is known about the relationships between these aspects. In this paper, we present an outdoor AR experiment (N=20) using a HoloLens 2 HMD. Participants estimated size, distance, and speed of Familiar and Unfamiliar outdoor animals at three distances (30, 60, 90 meters). To investigate whether providing information about one aspect may influence another, we divided our experiment into three phases. In Phase I, participants estimated the three aspects without any provided information. In Phase II, participants were given accurate size information, then asked to estimate distance and speed. In Phase III, participants were given accurate distance and size information, then asked to estimate speed. Our results show that estimates of speed in particular of the Unfamiliar animals benefited from provided size information, while speed estimates of all animals benefited from provided distance information. We found no support for the assumption that distance estimates benefited from provided size information.}, keywords = {A-ae, A-gb, A-gfw, A-jb, A-mg, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{nokey, title = {rlty2rlty: Transitioning Between Realities with Generative AI}, author = {Matt Gottsacker and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2024/02/vr24d-sub1075-cam-i5.pdf https://www.youtube.com/watch?v=u4CyvdE3Y3g}, year = {2024}, date = {2024-02-20}, booktitle = {Proceedings of IEEE Conference on Virtual Reality and 3D User Interfaces (VR)}, pages = {1--2}, abstract = {We present a system for visually transitioning a mixed reality (MR) user between two arbitrary realities (e.g., between two virtual worlds or between the real environment and a virtual world). The system uses artificial intelligence (AI) to generate a 360° video that transforms the user’s starting environment to another environment, passing through a liminal space that could help them relax between tasks or prepare them for the ending environment. The video can then be viewed on an MR headset.}, keywords = {A-gb, A-gfw, A-mg, F-ONR, P-EICAR, SREAL}, pubstate = {forthcoming}, tppubtype = {inproceedings} } @patent{Bruder2023aa, title = {[No title]}, author = {Gerd Bruder and Greg Welch and Kangsoo Kim and Zubin Choudhary}, year = {2023}, date = {2023-10-24}, abstract = {One or more cameras capture objects at a higher resolution than the human eye can perceive. Objects are segmented from the background of the image and scaled to human perceptible size. The scaled-up objects are superimposed over the unscaled background. This is presented to a user via a display whereby the process selectively amplifies the size of the objects' spatially registered retinal projection while maintaining a natural (unmodified) view in the remainder of the visual field.}, keywords = {}, pubstate = {published}, tppubtype = {patent} } @patent{Bruder2023aa, title = {Spatial positioning of targeted object magnification}, author = {Gerd Bruder and Greg Welch and Kangsoo Kim and Zubin Choudhary}, url = {https://image-ppubs.uspto.gov/dirsearch-public/print/downloadPdf/11798127 https://sreal.ucf.edu/wp-content/uploads/2023/10/11798127.pdf}, year = {2023}, date = {2023-10-24}, urldate = {2023-10-24}, number = {US 11,798,127}, abstract = {One or more cameras capture objects at a higher resolution than the human eye can perceive. Objects are segmented from the background of the image and scaled to human perceptible size. The scaled-up objects are superimposed over the unscaled background. This is presented to a user via a display whereby the process selectively amplifies the size of the objects' spatially registered retinal projection while maintaining a natural (unmodified) view in the remainder of the visual field.}, keywords = {A-gb, A-gfw, A-kk, A-zc, F-ONR, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{benjamin2023arscreen, title = {Perception and Proxemics with Virtual Humans on Transparent Display Installations in Augmented Reality}, author = {Juanita Benjamin and Gerd Bruder and Carsten Neumann and Dirk Reiners and Carolina Cruz-Neira and Gregory F Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2023/08/Perception-and-Proxemics-ISMAR-23-2.pdf}, year = {2023}, date = {2023-10-21}, urldate = {2023-10-21}, booktitle = {Proceedings of the IEEE Conference on International Symposium on Mixed and Augmented Reality (ISMAR) 2023.}, pages = {1--10}, abstract = {It is not uncommon for science fiction movies to portray futuristic user interfaces that can only be realized decades later with state-of-the-art technology. In this work, we present a prototypical augmented reality (AR) installation that was inspired by the movie The Time Machine (2002). It consists of a transparent screen that acts as a window through which users can see the stereoscopic projection of a three-dimensional virtual human (VH). However, there are some key differences between the vision of this technology and the way VHs on these displays are actually perceived. In particular, the additive light model of these displays causes darker VHs to appear more transparent, while light in the physical environment further increases transparency, which may affect the way VHs are perceived, to what degree they are trusted, and the distances one maintains from them in a spatial setting. In this paper, we present a user study in which we investigate how transparency in the scope of transparent AR screens affects the perception of a VH's appearance, social presence with the VH, and the social space around users as defined by proxemics theory. Our results indicate that appearances are comparatively robust to transparency, while social presence improves in darker physical environments, and proxemic distances to the VH largely depend on one's distance from the screen but are not noticeably affected by transparency. Overall, our results suggest that such transparent AR screens can be an effective technology for facilitating social interactions between users and VHs in a shared physical space.}, keywords = {A-gb, A-gfw, A-jb, F-NSF, F-ONR, P-HSI, SREAL}, pubstate = {forthcoming}, tppubtype = {inproceedings} } @article{Choudhary2023Speech, title = {Visual Facial Enhancements Can Significantly Improve Speech Perception in the Presence of Noise}, author = {Zubin Choudhary and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2023/07/final_sub1046_ISMAR23-compressed.pdf}, year = {2023}, date = {2023-10-17}, urldate = {2023-10-17}, journal = {IEEE Transactions on Visualization and Computer Graphics, Special Issue on the IEEE International Symposium on Mixed and Augmented Reality (ISMAR) 2023.}, abstract = {Human speech perception is generally optimal in quiet environments, however it becomes more difficult and error prone in the presence of noise, such as other humans speaking nearby or ambient noise. In such situations, human speech perception is improved by speech reading, i.e., watching the movements of a speaker’s mouth and face, either consciously as done by people with hearing loss or subconsciously by other humans. While previous work focused largely on speech perception of two-dimensional videos of faces, there is a gap in the research field focusing on facial features as seen in head-mounted displays, including the impacts of display resolution, and the effectiveness of visually enhancing a virtual human face on speech perception in the presence of noise. In this paper, we present a comparative user study (N = 21) in which we investigated an audio-only condition compared to two levels of head-mounted display resolution (1832×1920 or 916×960 pixels per eye) and two levels of the native or visually enhanced appearance of a virtual human, the latter consisting of an up-scaled facial representation and simulated lipstick (lip coloring) added to increase contrast. To understand effects on speech perception in noise, we measured participants’ speech reception thresholds (SRTs) for each audio-visual stimulus condition. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. First, we show that the display resolution significantly affected participants’ ability to perceive the speech signal in noise, which has practical implications for the field, especially in social virtual environments. Second, we show that our visual enhancement method was able to compensate for limited display resolution and was generally preferred by participants. Specifically, our participants indicated that they benefited from the head scaling more than the added facial contrast from the simulated lipstick. We discuss relationships, implications, and guidelines for applications that aim to leverage such enhancements.}, keywords = {A-gb, A-gfw, A-zc, P-HSI, SREAL}, pubstate = {forthcoming}, tppubtype = {article} } @inproceedings{schubert2023tf, title = {Testbed for Intuitive Magnification in Augmented Reality}, author = {Ryan Schubert and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2023/08/ismar23_schubert2023tf.pdf https://sreal.ucf.edu/wp-content/uploads/2023/08/ismar23_schubert2023tf.mp4}, year = {2023}, date = {2023-10-16}, urldate = {2023-10-16}, booktitle = {Proceedings IEEE International Symposium on Mixed and Augmented Reality Adjunct (ISMAR-Adjunct)}, pages = {1--2}, abstract = {Humans strive to magnify portions of our visually perceived surroundings for various reasons, e.g., because they are too far away or too small to see. Different technologies have been introduced for magnification, from monoculars to binoculars, and telescopes to microscopes. A promising technology are modern high-resolution digital cameras, which are capable of optical or digital zoom and very flexible as their imagery can be presented to users in real-time with mobile or head-mounted displays and intuitive 3D user interfaces allowing control over the magnification. In this demo, we present a novel design space and testbed for intuitive augmented reality (AR) magnifications, where an AR optical see-through head-mounted display is used for the presentation of real-time magnified camera imagery. The testbed includes different unimanual and bimanual AR interaction techniques for defining the scale factor and portion of the user's visual field that should be magnified.}, keywords = {A-gb, A-gfw, A-rs, F-NSF, F-ONR, P-ARA, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Choudhary2023aids, title = {Visual Hearing Aids: Artificial Visual Speech Stimuli for Audiovisual Speech Perception in Noise}, author = {Zubin Choudhary and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2023/09/MAIN_VRST_23_SpeechPerception_Phone.pdf}, year = {2023}, date = {2023-10-09}, urldate = {2023-10-09}, booktitle = {Proceedings of the 29th ACM Symposium on Virtual Reality Software and Technology, 2023}, abstract = {Speech perception is optimal in quiet environments, but noise can impair comprehension and increase errors. In these situations, lip reading can help, but it is not always possible, such as during an audio call or when wearing a face mask. One approach to improve speech perception in these situations is to use an artificial visual lip reading aid. In this paper, we present a user study (𝑁 = 17) in which we compared three levels of audio stimuli visualizations and two levels of modulating the appearance of the visualization based on the speech signal, and we compared them against two control conditions: an audio-only condition, and a real human speaking. We measured participants’ speech reception thresholds (SRTs) to understand the effects of these visualizations on speech perception in noise. These thresholds indicate the decibel levels of the speech signal that are necessary for a listener to receive the speech correctly 50% of the time. Additionally, we measured the usability of the approaches and the user experience. We found that the different artificial visualizations improved participants’ speech reception compared to the audio-only baseline condition, but they were significantly poorer than the real human condition. This suggests that different visualizations can improve speech perception when the speaker’s face is not available. However, we also discuss limitations of current plug-and-play lip sync software and abstract representations of the speaker in the context of speech perception.}, keywords = {A-gb, A-gfw, A-zc, SREAL}, pubstate = {forthcoming}, tppubtype = {conference} } @inproceedings{schubert2023iu, title = {Intuitive User Interfaces for Real-Time Magnification in Augmented Reality }, author = {Ryan Schubert and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2023/08/vrst23_bruder2023iu.pdf}, year = {2023}, date = {2023-10-09}, urldate = {2023-10-09}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {1--10}, abstract = {Various reasons exist why humans desire to magnify portions of our visually perceived surroundings, e.g., because they are too far away or too small to see with the naked eye. Different technologies are used to facilitate magnification, from telescopes to microscopes using monocular or binocular designs. In particular, modern digital cameras capable of optical and/or digital zoom are very flexible as their high-resolution imagery can be presented to users in real-time with displays and interfaces allowing control over the magnification. In this paper, we present a novel design space of intuitive augmented reality (AR) magnifications where an AR head-mounted display is used for the presentation of real-time magnified camera imagery. We present a user study evaluating and comparing different visual presentation methods and AR interaction techniques. Our results show different advantages for unimanual, bimanual, and situated AR magnification window interfaces, near versus far vergence distances for the image presentation, and five different user interfaces for specifying the scaling factor of the imagery.}, keywords = {A-gb, A-gfw, A-rs, F-NSF, F-ONR, P-ARA, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2022ab, title = {Intelligent Digital Interruption Management}, author = {Gregory Welch and Matthew Gottsacker and Nahal Norouzi and Gerd Bruder}, url = {https://image-ppubs.uspto.gov/dirsearch-public/print/downloadPdf/11729448 https://sreal.ucf.edu/wp-content/uploads/2023/08/US11729448.pdf}, year = {2023}, date = {2023-08-15}, urldate = {2023-08-15}, number = {US 11,729,448}, abstract = {The present invention is a system to manage interrupt notifications on an operating system based on the characteristics of content in which an end user is currently immersed or engaged. For example, relatively high bitrate video throughput is indicative of corresponding high infor- mation depth and more action occurring in the scene. For periods of high information depth, interrupt notifications are deferred until the information depth falls into a relative trough. Additional embodiments of the invention process scene transitions, technical cues, dialog and lyrics to release queued interrupt notification at optimal times. A vamping process is also provided when interrupt notification are released to keep the end user prescient to the background application in which they were engaged prior to the interrupt notification coming into focus.}, keywords = {A-gb, A-gfw, A-mg, F-NSF, F-ONR, SREAL}, pubstate = {published}, tppubtype = {patent} } @conference{Choudhary2023, title = {Exploring the Social Influence of Virtual Humans Unintentionally Conveying Conflicting Emotions}, author = {Zubin Choudhary and Nahal Norouzi and Austin Erickson and Ryan Schubert and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2023/01/PostReview_ConflictingEmotions_IEEEVR23-1.pdf}, year = {2023}, date = {2023-03-29}, urldate = {2023-03-29}, booktitle = {Proceedings of the 30th IEEE Conference on Virtual Reality and 3D User Interfaces, IEEE VR 2023}, abstract = {The expression of human emotion is integral to social interaction, and in virtual reality it is increasingly common to develop virtual avatars that attempt to convey emotions by mimicking these visual and aural cues, i.e. the facial and vocal expressions. However, errors in (or the absence of) facial tracking can result in the rendering of incorrect facial expressions on these virtual avatars. For example, a virtual avatar may speak with a happy or unhappy vocal inflection while their facial expression remains otherwise neutral. In circumstances where there is conflict between the avatar’s facial and vocal expressions, it is possible that users will incorrectly interpret the avatar’s emotion, which may have unintended consequences in terms of social influence or in terms of the outcome of the interaction. In this paper, we present a human-subjects study (N = 22 ) aimed at understanding the impact of conflicting facial and vocal emotional expressions. Specifically we explored three levels of emotional valence (unhappy, neutral, and happy) expressed in both visual (facial) and aural (vocal) forms. We also investigate three levels of head scales (down-scaled, accurate, and up-scaled) to evaluate whether head scale affects user interpretation of the conveyed emotion. We find significant effects of different multimodal expressions on happiness and trust perception, while no significant effect was observed for head scales. Evidence from our results suggest that facial expressions have a stronger impact than vocal expressions. Additionally, as the difference between the two expressions increase, the less predictable the multimodal expression becomes. For example, for the happy-looking and happy-sounding multimodal expression, we expect and see high happiness rating and high trust, however if one of the two expressions change, this mismatch makes the expression less predictable. We discuss the relationships, implications, and guidelines for social applications that aim to leverage multimodal social cues.}, keywords = {A-ae, A-gb, A-gfw, A-nn, A-rs, A-zc, SREAL}, pubstate = {published}, tppubtype = {conference} } @patent{Welch2023av, title = {Adaptive visual overlay for anatomical simulation}, author = {Greg Welch and Joseph LaViola and Francisco Guido-Sanz and Gerd Bruder and Mindi Anderson and Ryan Schubert}, url = {https://image-ppubs.uspto.gov/dirsearch-public/print/downloadPdf/11557216 https://sreal.ucf.edu/wp-content/uploads/2023/11/11557216.pdf}, year = {2023}, date = {2023-01-17}, urldate = {2023-01-17}, number = {US 11,557,216 B2}, abstract = {An anatomical feature simulation unit is a physical device designed to help simulate an anatomical feature (e.g., a wound) on an object (e.g., a human being or human surrogate such as a medical manikin) for instructing a trainee to learn or practice treatment skills. For the trainee, the simulation looks like a real body part when viewed using an Augmented Reality (AR) system. Responsive to a change in the anatomic state of the object (e.g., bending a knee or raising of an arm) not only the spatial location and orientation of the anatomical feature stays locked on the object in the AR system, but the characteristics of the anatomical feature change based on the physiologic logic of changing said anatomical state (e.g., greater or less blood flow, opening or closing of a wound).}, keywords = {A-gb, A-gfw, A-rs, F-NSF, F-ONR, SREAL}, pubstate = {published}, tppubtype = {patent} } @patent{nokey, title = {Grammar Dependent Tactile Pattern Invocation}, author = {Greg Welch and Gerd Bruder and Ryan McMahan}, url = {https://ppubs.uspto.gov/pubwebapp/external.html?q=11550470 https://sreal.ucf.edu/wp-content/uploads/2023/01/US11550470.pdf}, year = {2023}, date = {2023-01-10}, urldate = {2023-01-10}, number = {US 11,550,470}, abstract = {A system for translating text streams of alphanumeric characters into preconfigured, haptic output. Text strings are parsed against a grammar index to locate assigned haptic or vibratory output. This may include speech-to-text, chat messaging, or text arrays of any kind. When a match is located, a standardized haptic output pattern is invoked through a haptic device. A device affordance module adapts the haptic output pattern to the capabilities of the target haptic device. }, keywords = {A-gb, A-gfw, F-NSF, F-ONR, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @inbook{Kim2023aa, title = {The Augmented Reality Internet of Things: Opportunities of Embodied Interactions in Transreality}, author = {Kangsoo Kim and Nahal Norouzi and Dongsik Jo and Gerd Bruder and Greg Welch}, editor = {Andrew Yeh Ching Nee and Soh Khim Ong}, url = {https://doi.org/10.1007/978-3-030-67822-7_32}, doi = {10.1007/978-3-030-67822-7_32}, isbn = {978-3-030-67822-7}, year = {2023}, date = {2023-01-01}, urldate = {2023-01-01}, booktitle = {Springer Handbook of Augmented Reality}, pages = {797--829}, publisher = {Springer International Publishing}, address = {Cham}, abstract = {Human society is encountering a new wave of advancements related to smart connected technologies with the convergence of different traditionally separate fields, which can be characterized by a fusion of technologies that merge and tightly integrate the physical, digital, and biological spheres. In this new paradigm of convergence, all the physical and digital things will become more and more intelligent and connected to each other through the Internet, and the boundary between them will blur and become seamless. In particular, augmented/mixed reality (AR/MR), which combines virtual content with the real environment, is experiencing an unprecedented golden era along with dramatic technological achievements and increasing public interest. Together with advanced artificial intelligence (AI) and ubiquitous computing empowered by the Internet of Things/Everything (IoT/IoE) systems, AR can be our ultimate interface to interact with both digital (virtual) and physical (real) worlds while pervasively mediating and enriching our lives.}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, SREAL}, pubstate = {published}, tppubtype = {inbook} } @patent{Welch2022ac, title = {Medical Monitoring Virtual Human with Situational Awareness}, author = {Greg Welch and Gerd Bruder}, url = {https://image-ppubs.uspto.gov/dirsearch-public/print/downloadPdf/11535261}, year = {2022}, date = {2022-12-17}, urldate = {2022-12-17}, number = {US 11,535,261}, abstract = {Virtual humans exhibit behaviors associated with inputs and outputs of an autonomous control system for medical monitoring of patients. To foster the awareness and trust, the virtual humans exhibit situational awareness via apparent (e.g., rendered) behaviors based on inputs such as physiological vital signs. The virtual humans also exhibit situational control via apparent behaviors associated with outputs such as direct control of devices, functions of control, actions based on high-level goals, and the optional use of virtual versions of conventional physical controls. A dynamic virtual human who continually exhibits awareness of the system state and relevant contextual circumstances, along with the ability to directly control the system, is used to reduce negative feelings associated with the system such as uncertainty, concern, stress, or anxiety on the part of real human patients. }, keywords = {A-gb, A-gfw, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Choudhary2022, title = {Virtual Big Heads in Extended Reality: Estimation of Ideal Head Scales and Perceptual Thresholds for Comfort and Facial Cues}, author = {Zubin Choudhary and Austin Erickson and Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Greg Welch}, url = {https://drive.google.com/file/d/1jdxwLchDH0RPouVENoSx8iSOyDmJhqKb/view?usp=sharing}, year = {2022}, date = {2022-11-02}, urldate = {2022-11-02}, journal = {ACM Transactions on Applied Perception}, abstract = {Extended reality (XR) technologies, such as virtual reality (VR) and augmented reality (AR), provide users, their avatars, and embodied agents a shared platform to collaborate in a spatial context. While traditional face-to-face communication is limited by users' proximity, meaning that another human's non-verbal embodied cues become more difficult to perceive the farther one is away from that person, In this paper, we describe and evaluate the ``Big Head'' technique, in which a human's head in VR/AR is scaled up relative to their distance from the observer as a mechanism for enhancing the visibility of non-verbal facial cues, such as facial expressions or eye gaze. To better understand and explore this technique, we present two complimentary human-subject experiments in this paper. In our first experiment, we conducted a VR study with a head-mounted display (HMD) to understand the impact of increased or decreased head scales on participants' ability to perceive facial expressions as well as their sense of comfort and feeling of ``uncannniness'' over distances of up to 10 meters. We explored two different scaling methods and compared perceptual thresholds and user preferences. Our second experiment was performed in an outdoor AR environment with an optical see-through (OST) HMD. Participants were asked to estimate facial expressions and eye gaze, and identify a virtual human over large distances of 30, 60, and 90 meters. In both experiments, our results show significant differences in minimum, maximum, and ideal head scales for different distances and tasks related to perceiving faces, facial expressions, and eye gaze, while we also found that participants were more comfortable with slightly bigger heads at larger distances. We discuss our findings with respect to the technologies used, and we discuss implications and guidelines for practical applications that aim to leverage XR-enhanced facial cues.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, A-zc, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{gottsacker2022noise, title = {Effects of Environmental Noise Levels on Patient Handoff Communication in a Mixed Reality Simulation}, author = {Matt Gottsacker and Nahal Norouzi and Ryan Schubert and Frank Guido-Sanz and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/10/main.pdf}, doi = {10.1145/3562939.3565627}, isbn = {978-1-4503-9889-3/22/11}, year = {2022}, date = {2022-10-27}, urldate = {2022-10-27}, booktitle = {28th ACM Symposium on Virtual Reality Software and Technology (VRST '22)}, pages = {1-10}, abstract = {When medical caregivers transfer patients to another person's care (a patient handoff), it is essential they effectively communicate the patient's condition to ensure the best possible health outcomes. Emergency situations caused by mass casualty events (e.g., natural disasters) introduce additional difficulties to handoff procedures such as environmental noise. We created a projected mixed reality simulation of a handoff scenario involving a medical evacuation by air and tested how low, medium, and high levels of helicopter noise affected participants' handoff experience, handoff performance, and behaviors. Through a human-subjects experimental design study (N = 21), we found that the addition of noise increased participants' subjective stress and task load, decreased their self-assessed and actual performance, and caused participants to speak louder. Participants also stood closer to the virtual human sending the handoff information when listening to the handoff than they stood to the receiver when relaying the handoff information. We discuss implications for the design of handoff training simulations and avenues for future handoff communication research.}, keywords = {A-gb, A-gfw, A-mg, A-nn, A-rs, F-FHTCC, F-NSF, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{gottsacker2022desktopar, title = {Towards a Desktop–AR Prototyping Framework: Prototyping Cross-Reality Between Desktops and Augmented Reality}, author = {Robbe Cools and Matt Gottsacker and Adalberto Simeone and Gerd Bruder and Gregory F. Welch and Steven Feiner}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/10/ISMAR2022_Workshop_on_Prototyping_Cross_Reality_Systems.pdf}, doi = {10.1109/ISMAR-Adjunct57072.2022.00040}, year = {2022}, date = {2022-10-22}, urldate = {2022-10-22}, booktitle = {Adjunct Proceedings of IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, pages = {175-182}, abstract = {Augmented reality (AR) head-worn displays (HWDs) allow users to view and interact with virtual objects anchored in the 3D space around them. These devices extend users’ digital interaction space compared to traditional desktop computing environments by both allowing users to interact with a larger virtual display and by affording new interactions (e.g., intuitive 3D manipulations) with virtual content. Yet, 2D desktop displays still have advantages over AR HWDs for common computing tasks and will continue to be used well into the future. Because of their not entirely overlapping set of affordances, AR HWDs and 2D desktops may be useful in a hybrid configuration; that is, users may benefit from being able to work on computing tasks in either environment (or simultaneously in both environments) while transitioning virtual content between them. In support of such computing environments, we propose a prototyping framework for bidirectional Cross-Reality interactions between a desktop and an AR HWD. We further implemented a proof-of-concept seamless Desktop–AR display space, and describe two concrete use cases for our framework. In future work we aim to further develop our proof-of-concept into the proposed framework.}, keywords = {A-gb, A-gfw, A-mg, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2022c, title = {[Poster] Adapting Michelson Contrast for use with Optical See-Through Displays}, author = {Austin Erickson and Gerd Bruder and Gregory F Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/10/ISMARContrastModel_POSTER.pdf}, year = {2022}, date = {2022-10-17}, urldate = {2022-10-17}, booktitle = {In Adjunct Proceedings of the IEEE International Symposium on Mixed and Augmented Reality}, pages = {1--2}, publisher = {IEEE}, organization = {IEEE}, abstract = {Due to the additive light model employed by current optical see-through head-mounted displays (OST-HMDs), the perceived contrast of displayed imagery is reduced with increased environment luminance, often to the point where it becomes difficult for the user to accurately distinguish the presence of visual imagery. While existing contrast models, such as Weber contrast and Michelson contrast, can be used to predict when the observer will experience difficulty distinguishing and interpreting stimuli on traditional displays, these models must be adapted for use with additive displays. In this paper, we present a simplified model of luminance contrast for optical see-through displays derived from Michelson’s contrast equation and demonstrate two applications of the model: informing design decisions involving the color of virtual imagery and optimizing environment light attenuation through the use of neutral density filters.}, keywords = {A-ae, A-gb, A-gfw, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{nokey, title = {[POSTER] Exploring Cues and Signaling to Improve Cross-Reality Interruptions}, author = {Matt Gottsacker and Raiffa Syamil and Pamela Wisniewski and Gerd Bruder and Carolina Cruz-Neira and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/09/ISMAR22_CrossReality_camready_3.pdf}, doi = {10.1109/ISMAR-Adjunct57072.2022.00179}, year = {2022}, date = {2022-10-15}, urldate = {2022-10-15}, booktitle = {Adjunct Proceedings of IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, pages = {827-832}, abstract = {In this paper, we report on initial work exploring the potential value of technology-mediated cues and signals to improve cross-reality interruptions. We investigated the use of color-coded visual cues (LED lights) to help a person decide when to interrupt a virtual reality (VR) user, and a gesture-based mechanism (waving at the user) to signal their desire to do so. To assess the potential value of these mechanisms we conducted a preliminary 2×3 within-subjects experimental design user study (N = 10) where the participants acted in the role of the interrupter. While we found that our visual cues improved participants’ experiences, our gesture-based signaling mechanism did not, as users did not trust it nor consider it as intuitive as a speech-based mechanism might be. Our preliminary findings motivate further investigation of interruption cues and signaling mechanisms to inform future VR head-worn display system designs.}, keywords = {A-gb, A-gfw, A-mg, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2022aa, title = {Augmentation of Relative Pose In Co-located Devices}, author = {Gregory Welch and Gerd Bruder}, url = {https://ppubs.uspto.gov/pubwebapp/external.html?q=11467399 https://sreal.ucf.edu/wp-content/uploads/2022/10/US11467399.pdf }, year = {2022}, date = {2022-10-11}, urldate = {2022-10-11}, number = {US 11,467,399}, abstract = {This invention relates to tracking of hand-held devices and vehicles with respect to each other, in circumstances where there are two or more users or existent objects interacting in the same share space (co-location). It extends conventional global and body-relative approaches to “cooperatively” estimate the relative poses between all useful combinations of user-worn tracked devices such as HMDs and hand-held controllers worn (or held) by multiple users. Additionally, the invention provides for tracking of vehicles such as cars and unmanned aerial vehicles.}, keywords = {A-gb, A-gfw, F-NSF, F-ONR, P-ARA, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Ramos2022, title = {Effects of Optical See-Through Displays on Self-Avatar Appearance in Augmented Reality}, author = {Meelad Doroodchi and Priscilla Ramos and Austin Erickson and Hiroshi Furuya and Juanita Benjamin and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/10/IDEATExR2022_REU_Paper.pdf}, year = {2022}, date = {2022-08-17}, urldate = {2022-08-31}, booktitle = {Proceedings of the International Symposium on Mixed and Augmented Reality (ISMAR)}, organization = {IEEE}, abstract = {Display technologies in the fields of virtual and augmented reality can affect the appearance of human representations, such as avatars used in telepresence or entertainment applications. In this paper, we describe a user study (N=20) where participants saw themselves in a mirror side-by-side with their own avatar, through use of a HoloLens 2 optical see-through head-mounted display. Participants were tasked to match their avatar’s appearance to their own under two environment lighting conditions (200 lux and 2,000 lux). Our results showed that the intensity of environment lighting had a significant effect on participants selected skin colors for their avatars, where participants with dark skin colors tended to make their avatar’s skin color lighter, nearly to the level of participants with light skin color. Further, in particular female participants made their avatar’s hair color darker for the lighter environment lighting condition. We discuss our results with a view on technological limitations and effects on the diversity of avatar representations on optical see-through displays.}, keywords = {A-ae, A-gb, A-gfw, A-hf, A-jb, F-NSF, F-ONR, P-ARA, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Bruder2022ph, title = {Intelligent Object Magnification for Augmented Reality Displays}, author = {Gerd Bruder and Gregory Welch and Kangsoo Kim and Zubin Choudhary}, url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=11410270.PN.&OS=PN/11410270&RS=PN/11410270 https://sreal.ucf.edu/wp-content/uploads/2022/08/Bruder2022ph.pdf}, year = {2022}, date = {2022-08-09}, urldate = {2022-08-09}, number = {US 11,410,270}, keywords = {A-gb, A-gfw, A-kk, A-zc, F-ONR, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Erickson2022b, title = {Analysis of the Saliency of Color-Based Dichoptic Cues in Optical See-Through Augmented Reality}, author = {Austin Erickson and Gerd Bruder and Gregory F. Welch}, editor = {Klaus Mueller}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/07/ARPreattentiveCues-1.pdf}, doi = {10.1109/TVCG.2022.3195111}, year = {2022}, date = {2022-07-26}, urldate = {2022-07-26}, journal = {Transactions on Visualization and Computer Graphics}, pages = {1-15}, abstract = {In a future of pervasive augmented reality (AR), AR systems will need to be able to efficiently draw or guide the attention of the user to visual points of interest in their physical-virtual environment. Since AR imagery is overlaid on top of the user’s view of their physical environment, these attention guidance techniques must not only compete with other virtual imagery, but also with distracting or attention-grabbing features in the user’s physical environment. Because of the wide range of physical-virtual environments that pervasive AR users will find themselves in, it is difficult to design visual cues that “pop out” to the user without performing a visual analysis of the user’s environment, and changing the appearance of the cue to stand out from its surroundings. In this paper, we present an initial investigation into the potential uses of dichoptic visual cues for optical see-through AR displays, specifically cues that involve having a difference in hue, saturation, or value between the user’s eyes. These types of cues have been shown to be preattentively processed by the user when presented on other stereoscopic displays, and may also be an effective method of drawing user attention on optical see-through AR displays. We present two user studies: one that evaluates the saliency of dichoptic visual cues on optical see-through displays, and one that evaluates their subjective qualities. Our results suggest that hue-based dichoptic cues or “Forbidden Colors” may be particularly effective for these purposes, achieving significantly lower error rates in a pop out task compared to value-based and saturation-based cues.}, keywords = {A-ae, A-gb, A-gfw, F-NSF, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Chen2022ky, title = {Immersive Media Technologies: The Acceleration of Augmented and Virtual Reality in the Wake of COVID-19}, author = {Pearly Chen and Mark Griswold and Hao Li and Sandra Lopez and Nahal Norouzi and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/03/WEF_Immersive_Media_Technologies_2022.pdf https://www.weforum.org/reports/immersive-media-technologies-the-acceleration-of-augmented-and-virtual-reality-in-the-wake-of-covid-19}, year = {2022}, date = {2022-06-20}, urldate = {2022-06-20}, journal = {World Economic Forum}, abstract = {The COVID-19 pandemic disrupted whole economies. Immersive media businesses, which focus on technologies that create or imitate the physical world through digital simulation, have been no exception. The Global Future Council on Augmented Reality and Virtual Reality, which is comprised of interdisciplinary thought leaders in immersive technology and media, has examined the transformative impact of the pandemic and the speed of adoption of these technologies across industries.}, howpublished = {url{https://www.weforum.org/reports/immersive-media-technologies-the-acceleration-of-augmented-and-virtual-reality-in-the-wake-of-covid-19}}, keywords = {A-gfw, A-nn, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Erickson2022, title = {Perceived Humanness Bias in Additive Light Model Displays (Poster)}, author = {Austin Erickson and Gerd Bruder and Gregory Welch and Isaac Bynum and Tabitha Peck and Jessica Good}, url = {https://www.visionsciences.org/presentation/?id=4201 }, year = {2022}, date = {2022-05-17}, urldate = {2022-05-17}, journal = {Journal of Vision}, issue = {Journal of Vision}, publisher = {Association for Research in Vision and Ophthalmology (ARVO)}, abstract = {Additive light model displays, such as optical see-through augmented reality displays, create imagery by adding light over a physical scene. While these types of displays are commonly used, they are limited in their ability to display dark, low-luminance colors. As a result of this, these displays cannot render the color black and other similar colors, and instead the resulting color is rendered as completely transparent. This optical limitation introduces perceptual problems, as virtual imagery with dark colors appears semi-transparent, while lighter colored imagery is more opaque. We generated an image set of virtual humans that captures the peculiarities of imagery shown on an additive display by performing a perceptual matching task between imagery shown on a Microsoft HoloLens and imagery shown on a flat panel display. We then used this image set to run an online user study to explore whether this optical limitation introduces bias in user perception of virtual humans of different skin colors. We evaluated virtual avatars and virtual humans at different opacity levels ranging from how they currently appear on the Microsoft HoloLens, to how they would appear on a display without transparency and color blending issues. Our results indicate that, regardless of skin tone, the perceived humanness of the virtual humans and avatars decreases with respect to opacity level. As a result of this, virtual humans with darker skin tones are perceived as less human compared to those with lighter skin tones. This result suggests that there may be an unintentional racial bias when using applications involving telepresence or virtual humans on additive light model displays. While optical and hardware solutions to this problem are likely years away, we emphasize that future work should investigate how some of these perceptual issues may be overcome via software-based methods.}, keywords = {A-ae, A-gb, A-gfw, F-NSF, F-ONR, P-EICAR, SREAL}, pubstate = {forthcoming}, tppubtype = {article} } @article{Guido-Sanz2022ch, title = {Using Simulation to Test Validity and Reliability of I-BIDS: A New Handoff Tool}, author = {Frank Guido-Sanz and Mindi Anderson and Steven Talbert and Desiree A. Diaz and Gregory Welch and Alyssa Tanaka}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/07/Guido-Sanz2022ch.pdf}, year = {2022}, date = {2022-05-16}, urldate = {2022-05-16}, journal = {Simulation & Gaming}, volume = {53}, number = {4}, pages = {353-368}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Norouzi2022b, title = {The Advantages of Virtual Dogs Over Virtual People: Using Augmented Reality to Provide Social Support in Stressful Situations}, author = {Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Jeremy Bailenson and Pamela J. Wisniewski and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/05/1-s2.0-S1071581922000659-main.pdf}, year = {2022}, date = {2022-05-01}, urldate = {2022-05-01}, journal = {International Journal of Human Computer Studies}, abstract = {Past research highlights the potential for leveraging both humans and animals as social support figures in one’s real life to enhance performance and reduce physiological and psychological stress. Some studies have shown that typically dogs are more effective than people. Various situational and interpersonal circumstances limit the opportunities for receiving support from actual animals in the real world introducing the need for alternative approaches. To that end, advances in augmented reality (AR) technology introduce new opportunities for realizing and investigating virtual dogs as social support figures. In this paper, we report on a within-subjects 3x1 (i.e., no support, virtual human, or virtual dog) experimental design study with 33 participants. We examined the effect on performance, attitude towards the task and the support figure, and stress and anxiety measured through both subjective questionnaires and heart rate data. Our mixed-methods analysis revealed that participants significantly preferred, and more positively evaluated, the virtual dog support figure than the other conditions. Emerged themes from a qualitative analysis of our participants’ post-study interview responses are aligned with these findings as some of our participants mentioned feeling more comfortable with the virtual dog compared to the virtual human although the virtual human was deemed more interactive. We did not find significant differences between our conditions in terms of change in average heart rate; however, average heart rate significantly increased during all conditions. Our research contributes to understanding how AR virtual support dogs can potentially be used to provide social support to people in stressful situations, especially when real support figures cannot be present. We discuss the implications of our findings and share insights for future research.}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Li2022, title = {A Scoping Review of Assistance and Therapy with Head-Mounted Displays for People Who Are Visually Impaired}, author = {Yifan Li, Kangsoo Kim, Austin Erickson, Nahal Norouzi, Jonathan Jules, Gerd Bruder, Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/04/scoping.pdf}, doi = {10.1145/3522693}, issn = {1936-7228}, year = {2022}, date = {2022-04-21}, urldate = {2022-04-21}, journal = {ACM Transactions on Accessible Computing}, volume = {00}, number = {00}, issue = {00}, pages = {25}, abstract = {Given the inherent visual affordances of Head-Mounted Displays (HMDs) used for Virtual and Augmented Reality (VR/AR), they have been actively used over many years as assistive and therapeutic devices for the people who are visually impaired. In this paper, we report on a scoping review of literature describing the use of HMDs in these areas. Our high-level objectives included detailed reviews and quantitative analyses of the literature, and the development of insights related to emerging trends and future research directions. Our review began with a pool of 1251 papers collected through a variety of mechanisms. Through a structured screening process, we identified 61 English research papers employing HMDs to enhance the visual sense of people with visual impairments for more detailed analyses. Our analyses reveal that there is an increasing amount of HMD-based research on visual assistance and therapy, and there are trends in the approaches associated with the research objectives. For example, AR is most often used for visual assistive purposes, whereas VR is used for therapeutic purposes. We report on eight existing survey papers, and present detailed analyses of the 61 research papers, looking at the mitigation objectives of the researchers (assistive versus therapeutic), the approaches used, the types of HMDs, the targeted visual conditions, and the inclusion of user studies. In addition to our detailed reviews and analyses of the various characteristics, we present observations related to apparent emerging trends and future research directions.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, F-NSF, P-EICAR, SREAL}, pubstate = {forthcoming}, tppubtype = {article} } @inproceedings{gottsacker2022balancing, title = {[DC] Balancing Realities by Improving Cross-Reality Interactions}, author = {Matt Gottsacker}, year = {2022}, date = {2022-04-20}, urldate = {2022-04-20}, booktitle = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)}, pages = {944-945}, address = {Christchurch, New Zealand}, abstract = {Virtual reality (VR) devices have a demonstrated capability to make users feel present in a virtual world. Research has shown that, at times, users desire a less immersive system that provides them aware-ness of and the ability to interact with elements from the real world and with a variety of devices. Understanding such cross-reality interactions is an under-explored research area that will become increasingly important as immersive devices become more ubiquitous. In this extended abstract, I provide an overview of my previous PhD research on facilitating cross-reality interactions between VR users and nearby non-VR interrupters. I discuss planned future research to investigate the social norms that are complicated by these interactions and design solutions that lead to meaningful interactions. These topics and questions will be discussed at the IEEE VR 2022 Doctoral Consortium.}, keywords = {A-mg, F-NSF, F-ONR, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{US11287971, title = {Visual-Tactile Virtual Telepresence}, author = {Gregory Welch and Gerd Bruder and Ryan McMahan}, url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=11287971.PN.&OS=PN/11287971&RS=PN/112879711 https://sreal.ucf.edu/wp-content/uploads/2022/08/US11287971.pdf}, year = {2022}, date = {2022-03-29}, urldate = {2022-03-29}, number = {US 11,287,971}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Norouzi2022, title = {Virtual Humans with Pets and Robots: Exploring the Influence of Social Priming on One’s Perception of a Virtual Human}, author = {Nahal Norouzi and Matthew Gottsacker and Gerd Bruder and Pamela Wisniewski and Jeremy Bailenson and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/01/2022007720.pdf}, year = {2022}, date = {2022-03-16}, urldate = {2022-03-16}, booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR), Christchurch, New Zealand, 2022.}, pages = {10}, publisher = {IEEE}, abstract = {Social priming is the idea that observations of a virtual human (VH)engaged in short social interactions with a real or virtual human bystander can positively influence users’ subsequent interactions with that VH. In this paper we investigate the question of whether the positive effects of social priming are limited to interactions with humanoid entities. For instance, virtual dogs offer an attractive candidate for non-humanoid entities, as previous research suggests multiple positive effects. In particular, real human dog owners receive more positive attention from strangers than non-dog owners. To examine the influence of such social priming we carried out a human-subjects experiment with four conditions: three social priming conditions where a participant initially observed a VH interacting with one of three virtual entities (another VH, a virtual pet dog, or a virtual personal robot), and a non-social priming condition where a VH (alone) was intently looking at her phone as if reading something. We recruited 24 participants and conducted a mixed-methods analysis. We found that a VH’s prior social interactions with another VH and a virtual dog significantly increased participants’ perceptions of the VHs’ affective attraction. Also, participants felt more inclined to interact with the VH in the future in all of the social priming conditions. Qualitatively, we found that the social priming conditions resulted in a more positive user experience than the non-social priming condition. Also, the virtual dog and the virtual robot were perceived as a source of positive surprise, with participants appreciating the non-humanoid interactions for various reasons, such as the avoidance of social anxieties sometimes associated with humans.}, keywords = {A-gb, A-gfw, A-mg, A-nn, F-NSF, F-ONR, P-ARA, P-EICAR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Ugarte2022, title = {Distant Hand Interaction Framework in Augmented Reality}, author = {Jesus Ugarte and Nahal Norouzi and Austin Erickson and Gerd Bruder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2022/05/Distant_Hand_Interaction_Framework_in_Augmented_Reality.pdf}, doi = {10.1109/VRW55335.2022.00332}, year = {2022}, date = {2022-03-16}, urldate = {2022-03-16}, booktitle = {Proceedings of the 2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)}, pages = {962-963}, publisher = {IEEE}, address = {Christchurch, New Zealand}, organization = {IEEE}, series = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)}, abstract = {Recent augmented reality (AR) head-mounted displays support shared experiences among multiple users in real physical spaces. While previous research looked at different embodied methods to enhance interpersonal communication cues, so far, less research looked at distant interaction in AR and, in particular, distant hand communication, which can open up new possibilities for scenarios, such as large-group collaboration. In this demonstration, we present a research framework for distant hand interaction in AR, including mapping techniques and visualizations. Our techniques are inspired by virtual reality (VR) distant hand interactions, but had to be adjusted due to the different context in AR and limited knowledge about the physical environment. We discuss different techniques for hand communication, including deictic pointing at a distance, distant drawing in AR, and distant communication through symbolic hand gestures.}, keywords = {A-ae, A-gb, A-gfw, a-ju, A-nn, F-NSF, F-ONR, P-BED, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{nokey, title = {Effects of Transparency on Perceived Humanness: Implications for Rendering Skin Tones Using Optical See-Through Displays}, author = {Tabitha C. Peck; Jessica J. Good; Austin Erickson; Isaac Bynum; Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/02/AR_and_Avatar_Transparency.pdf https://www.youtube.com/watch?v=0tUlhbxhE6U&t=59s}, doi = {10.1109/TVCG.2022.3150521}, issn = {1941-0506}, year = {2022}, date = {2022-03-15}, urldate = {2022-03-15}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, number = {01}, pages = {1-11}, abstract = {Current optical see-through displays in the field of augmented reality are limited in their ability to display colors with low lightness in the hue, saturation, lightness (HSL) color space, causing such colors to appear transparent. This hardware limitation may add unintended bias into scenarios with virtual humans. Humans have varying skin tones including HSL colors with low lightness. When virtual humans are displayed with optical see-through devices, people with low lightness skin tones may be displayed semi-transparently while those with high lightness skin tones will be displayed more opaquely. For example, a Black avatar may appear semi-transparent in the same scene as a White avatar who will appear more opaque. We present an exploratory user study (N = 160) investigating whether differing opacity levels result in dehumanizing avatar and human faces. Results support that dehumanization occurs as opacity decreases. This suggests that in similar lighting, low lightness skin tones (e.g., Black faces) will be viewed as less human than high lightness skin tones (e.g., White faces). Additionally, the perceived emotionality of virtual human faces also predicts perceived humanness. Angry faces were seen overall as less human, and at lower opacity levels happy faces were seen as more human. Our results suggest that additional research is needed to understand the effects and interactions of emotionality and opacity on dehumanization. Further, we provide evidence that unintentional racial bias may be added when developing for optical see-through devices using virtual humans. We highlight the potential bias and discuss implications and directions for future research.}, keywords = {A-ae, A-gb, F-NSF, F-ONR, P-ARA, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Bynum2022, title = {The Effects of Transparency on Dehumanization of Black Avatars in Augmented Reality}, author = {Isaac Bynum and Jessica J. Good and Gerd Bruder and Austin Erickson and Tabitha C. Peck}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/07/spspPoster.pdf}, year = {2022}, date = {2022-02-16}, urldate = {2022-02-16}, booktitle = {Proceedings of the Annual Conference of the Society for Personality and Social Psychology}, address = {San Francisco, CA}, organization = {Society for Personality and Social Psychology}, series = {Annual Conference of the Society for Personality and Social Psychology}, howpublished = {Poster at Annual Conference of the Society for Personality and Social Psychology 2022}, keywords = {A-ae, A-gb, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2023aa, title = {Grammar Dependent Tactile Pattern Invocation}, author = {Gregory Welch and Gerd Bruder and Ryan McMahan}, url = {https://patents.google.com/patent/US11550470B2/en?oq=11550470}, year = {2022}, date = {2022-01-28}, keywords = {A-gb, A-gfw, F-NSF, P-BED}, pubstate = {published}, tppubtype = {patent} } @phdthesis{Norouzi2021c, title = {The Social and Behavioral Influences of Interactions with Virtual Dogs as Embodied Agents in Augmented and Virtual Reality}, author = {Nahal Norouzi}, url = {https://sreal.ucf.edu/wp-content/uploads/2022/04/The-Social-and-Behavioral-Influences-of-Interactions-with-Virtual.pdf}, year = {2021}, date = {2021-12-18}, urldate = {2021-12-18}, abstract = {Intelligent virtual agents (IVAs) have been researched for years and recently many of these IVAs have become commercialized and widely used by many individuals as intelligent personal assistants. The majority of these IVAs are anthropomorphic, and many are developed to resemble real humans entirely. However, real humans do not interact only with other humans in the real world, and many benefit from interactions with non-human entities. A prime example is human interactions with animals, such as dogs. Humans and dogs share a historical bond that goes back thousands of years. In the past 30 years, there has been a great deal of research to understand the effects of human-dog interaction, with research findings pointing towards the physical, mental, and social benefits to humans when interacting with dogs. However, limitations such as allergies, stress on dogs, and hygiene issues restrict some needy individuals from receiving such benefits. More recently, advances in augmented and virtual reality technology provide opportunities for realizing virtual dogs and animals, allowing for their three-dimensional presence in the users' real physical environment or while users are immersed in virtual worlds. In this dissertation, I utilize the findings from human-dog interaction research and conduct a systematic literature review on embodied IVAs to define a research scope to understand virtual dogs' social and behavioral influences in augmented and virtual reality. I present the findings of this systematic literature review that informed the creation of the research scope and four human-subjects studies. Through these user studies, I found that virtual dogs bring about a sense of comfort and companionship for users in different contexts. In addition, their responsiveness plays an important role in enhancing users' quality of experience, and they can be effectively utilized as attention guidance mechanisms and social priming stimuli.}, keywords = {A-nn, F-NSF, F-ONR}, pubstate = {published}, tppubtype = {phdthesis} } @conference{Choudhary2021d, title = {Real-Time Magnification in Augmented Reality}, author = {Zubin Choudhary and Jesus Ugarte and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_AR_Magnification_DEMO.pdf}, year = {2021}, date = {2021-11-09}, urldate = {2021-11-09}, booktitle = {Proceedings of the 2021 ACM Spatial User Interaction}, pages = {1-2}, organization = {ACM}, series = {SUI 2021}, abstract = {With recent advances in augmented reality (AR) and computer vision it has become possible to magnify objects in real time in a user’s field of view. AR object magnification can have different purposes, such as enhancing human visual capabilities with the BigHead technique, which works by up-scaling human heads to communicate important facial cues over longer distances. For this purpose, we created a prototype with a 4K camera mounted on a HoloLens 2. In this demo, we present the BigHead technique and proof of concept AR testbed to magnify heads in real-time. Further, we describe how hand gestures are detected to control the scale and position of the magnified head. We discuss the technique and implementation, and propose future research directions.}, keywords = {A-gb, A-gfw, a-ju, A-zc, P-EICAR, SREAL}, pubstate = {published}, tppubtype = {conference} } @inproceedings{Flick2021, title = {Trade-offs in Augmented Reality User Interfaces for Controlling a Smart Environment}, author = {Connor D. Flick and Courtney J. Harris and Nikolas T. Yonkers and Nahal Norouzi and Austin Erickson and Zubin Choudhary and Matt Gottsacker and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/SUI2021_REU_Paper.pdf}, year = {2021}, date = {2021-11-09}, urldate = {2021-11-09}, booktitle = {In Symposium on Spatial User Interaction (SUI '21)}, pages = {1-11}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, abstract = {Smart devices and Internet of Things (IoT) technologies are replacing or being incorporated into traditional devices at a growing pace. The use of digital interfaces to interact with these devices has become a common occurrence in homes, work spaces, and various industries around the world. The most common interfaces for these connected devices focus on mobile apps or voice control via intelligent virtual assistants. However, with augmented reality (AR) becoming more popular and accessible among consumers, there are new opportunities for spatial user interfaces to seamlessly bridge the gap between digital and physical affordances. In this paper, we present a human-subject study evaluating and comparing four user interfaces for smart connected environments: gaze input, hand gestures, voice input, and a mobile app. We assessed participants’ user experience, usability, task load, completion time, and preferences. Our results show multiple trade-offs between these interfaces across these measures. In particular, we found that gaze input shows great potential for future use cases, while both gaze input and hand gestures suffer from limited familiarity among users, compared to voice input and mobile apps.}, keywords = {A-ae, A-gb, A-gfw, A-mg, A-nn, A-zc, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2021oj, title = {Autonomous systems human controller simulation}, author = {Gregory Welch and Gerd Bruder}, url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=11148671.PN.&OS=PN/11148671&RS=PN/11148671 https://sreal.ucf.edu/wp-content/uploads/2021/10/US11148671.pdf}, year = {2021}, date = {2021-10-19}, urldate = {2021-10-19}, number = {US 11,148,671}, location = {US}, keywords = {A-gb, A-gfw, F-NSF, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Norouzi2021, title = {Virtual Animals as Diegetic Attention Guidance Mechanisms in 360-Degree Experiences}, author = {Nahal Norouzi and Gerd Bruder and Austin Erickson and Kangsoo Kim and Jeremy Bailenson and Pamela J. Wisniewski and Charles E. Hughes and and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/IEEE_ISMAR_TVCG_2021.pdf}, year = {2021}, date = {2021-10-15}, urldate = {2021-10-15}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG) Special Issue on ISMAR 2021}, pages = {11}, abstract = {360-degree experiences such as cinematic virtual reality and 360-degree videos are becoming increasingly popular. In most examples, viewers can freely explore the content by changing their orientation. However, in some cases, this increased freedom may lead to viewers missing important events within such experiences. Thus, a recent research thrust has focused on studying mechanisms for guiding viewers’ attention while maintaining their sense of presence and fostering a positive user experience. One approach is the utilization of diegetic mechanisms, characterized by an internal consistency with respect to the narrative and the environment, for attention guidance. While such mechanisms are highly attractive, their uses and potential implementations are still not well understood. Additionally, acknowledging the user in 360-degree experiences has been linked to a higher sense of presence and connection. However, less is known when acknowledging behaviors are carried out by attention guiding mechanisms. To close these gaps, we conducted a within-subjects user study with five conditions of no guide and virtual arrows, birds, dogs, and dogs that acknowledge the user and the environment. Through our mixed-methods analysis, we found that the diegetic virtual animals resulted in a more positive user experience, all of which were at least as effective as the non-diegetic arrow in guiding users towards target events. The acknowledging dog received the most positive responses from our participants in terms of preference and user experience and significantly improved their sense of presence compared to the non-diegetic arrow. Lastly, three themes emerged from a qualitative analysis of our participants’ feedback, indicating the importance of the guide’s blending in, its acknowledging behavior, and participants’ positive associations as the main factors for our participants’ preferences.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @conference{Gottsacker2021, title = {Diegetic Representations for Seamless Cross-Reality Interruptions}, author = {Matt Gottsacker and Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ISMAR_2021_Paper__Interruptions_.pdf}, year = {2021}, date = {2021-10-15}, urldate = {2021-10-15}, booktitle = {Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, pages = {10}, abstract = {Due to the closed design of modern virtual reality (VR) head-mounted displays (HMDs), users tend to lose awareness of their real-world surroundings. This is particularly challenging when an-other person in the same physical space needs to interrupt the VR user for a brief conversation. Such interruptions, e.g., tapping a VR user on the shoulder, can cause a disruptive break in presence (BIP),which affects their place and plausibility illusions, and may cause a drop in performance of their virtual activity. Recent findings related to the concept of diegesis, which denotes the internal consistency of an experience/story, suggest potential benefits of integrating registered virtual representations for physical interactors, especially when these appear internally consistent in VR. In this paper, we present a human-subject study we conducted to compare and evaluate five different diegetic and non-diegetic methods to facilitate cross-reality interruptions in a virtual office environment, where a user’s task was briefly interrupted by a physical person. We created a Cross-Reality Interaction Questionnaire (CRIQ) to capture the quality of the interaction from the VR user’s perspective. Our results show that the diegetic representations afforded the highest quality inter-actions, the highest place illusions, and caused the least disruption of the participants’ virtual experiences. We found reasonably high senses of co-presence with the partially and fully diegetic virtual representations. We discuss our findings as well as implications for practical applications that aim to leverage virtual representations to ease cross-reality interruptions}, keywords = {A-gb, A-gfw, A-kk, A-mg, A-nn, F-NSF, F-ONR, P-ARA, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {conference} } @inproceedings{Choudhary2021bb, title = {[DC] Amplifying Realities: Gradual and Seamless Scaling of Visual and Auditory Stimuli in Extended Reality}, author = {Zubin Choudhary}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ISMAR_DC_2021-ver2.pdf}, year = {2021}, date = {2021-10-08}, urldate = {2021-10-08}, pages = {4}, organization = {IEEE}, series = {ISMAR 2021}, abstract = {Existing literature in the field of extended reality has demonstrate that visual/auditory manipulations can change a person’s perception and behavior. For example, a mismatch between the physical self and the virtual self can have psychological and behavioral implications. There are any different approaches that can incur a perceptual change. An under-explored field of research are gradual and subtle manipulations, such as scaling the food one eats or scaling the heads of people one sees. In this position paper, I provide an overview of my prior PhD research focusing on means to gradually and seamlessly scale visual and auditory stimuli in extended reality, and investigations of the corresponding changes in human perception. I discuss future research topics and potential questions to be discussed at the ISMAR 2021 Doctoral Consortium.}, keywords = {A-zc, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2021b, title = {Augmenting Human Perception: Mediation of Extrasensory Signals in Head-Worn Augmented Reality}, author = {Austin Erickson and Dirk Reiners and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/ismar21d-sub1093-i6.pdf}, doi = {10.1109/ISMAR-Adjunct54149.2021.00085}, year = {2021}, date = {2021-10-04}, urldate = {2021-10-04}, booktitle = {Proceedings of the 2021 International Symposium on Mixed and Augmented Reality}, pages = {373-377}, publisher = {IEEE}, organization = {IEEE}, series = {ISMAR 2021}, abstract = {Mediated perception systems are systems in which sensory signals from the user's environment are mediated to the user's sensory channels. This type of system has great potential for enhancing the perception of the user via augmenting and/or diminishing incoming sensory signals according to the user's context, preferences, and perceptual capability. They also allow for extending the perception of the user to enable them to sense signals typically imperceivable to human senses, such as regions of the electromagnetic spectrum beyond visible light. However, in order to effectively mediate extrasensory data to the user, we need to understand when and how such data should be presented to them. In this paper, we present a prototype mediated perception system that maps extrasensory spatial data into visible light displayed within an augmented reality (AR) optical see-through head-mounted display (OST-HMD). Although the system is generalized such that it could support any spatial sensor data with minor modification, we chose to test the system using thermal infrared sensors. This system improves upon previous extended perception augmented reality prototypes in that it is capable of projecting egocentric sensor data in real time onto a 3D mesh generated by the OST-HMD that is representative of the user's environment. We present the lessons learned through iterative improvements to the system, as well as a performance analysis of the system and recommendations for future work.}, keywords = {A-ae, A-gb, A-gfw, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Choudhary2021b, title = {Scaled User Embodied Representations in Virtual and Augmented Reality}, author = {Zubin Choudhary and Gerd Bruder and Gregory F. Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2021/08/UIVR21-Submission-Final-1.pdf}, year = {2021}, date = {2021-09-08}, urldate = {2021-09-08}, publisher = {Workshop on User-Embodied Interaction in Virtual Reality (UIVR) 2021}, abstract = {Embodied user representations are important for a wide range of application domains involving human social interactions. While traditionally, human appearances were defined by the physics of the real world, we now have the means to go beyond such limitations with virtual, mixed, and augmented reality (VR/MR/AR) technologies. Different human appearances can have an impact on their perception and behavior with other users in social or collaborative environments. There is a growing literature about the impact of different user representations and behaviors on perception; however, investigating the impact of visual scaling of human body parts has so far received less attention from the research community. In this paper, we present and discuss our position that scaled user embodied representations in VR/MR/AR could lead to significant improvements for a range of use cases. We present our previous work on this topic, including the Big Head technique, through which virtual human heads can be scaled up or down. We motivate how it can improve the visibility of facial information, such as facial expressions and eye gaze, over long distances. Even when a human would be barely visible at a distance in the real world, this technique can recover lost embodied cues. We discuss perceptual effects of scaling human body parts and outline future research.}, keywords = {A-gb, A-gfw, A-zc, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Kim2021, title = {The Augmented Reality Internet of Things: Opportunities of Embodied Interactions in Transreality}, author = {Kangsoo Kim and Nahal Norouzi and Dongsik Jo and Gerd Bruder and and Gregory F. Welch}, editor = {A. Y. C. Nee and S. K. Ong}, year = {2021}, date = {2021-09-01}, urldate = {2021-09-01}, volume = {Handbook of Augmented Reality}, pages = {60}, publisher = {Springer }, abstract = {Human society is encountering a new wave of advancements related to smart connected technologies with the convergence of different traditionally separate fields, which can be characterized by a fusion of technologies that merge and tightly integrate the physical, digital, and biological spheres. In this new paradigm of convergence, all the physical and digital things will become more and more intelligent and connected to each other through the Internet, and the boundary between them will blur and become seamless. In particular, Augmented/Mixed Reality (AR/MR) combines virtual content with the real environment and is experiencing an unprecedented golden era along with dramatic technological achievements and increasing public interest. Together with advanced Artificial Intelligence (AI) and ubiquitous computing empowered by the Internet of Things/Everything (IoT/IoE) systems, AR can be our ultimate interface to interact with both digital (virtual) and physical (real) worlds while pervasively mediating and enriching our lives. In this chapter, we describe the concept of transreality that symbiotically connects the physical and the virtual worlds incorporating the aforementioned advanced technologies, and illustrate how such transreality environments can transform our activities in it, providing intelligent and intuitive interaction with the environment while exploring prior research literature in this domain. We also present the potential of virtually embodied interactions—e.g., employing virtual avatars and agents—in highly connected transreality spaces for enhancing human abilities and perception. Recent ongoing research focusing on the effects of embodied interaction are described and discussed in different aspects such as perceptual, cognitive, and social contexts. The chapter will end with discussions on potential research directions in the future and implications related to the user experience in transreality.}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, SREAL}, pubstate = {forthcoming}, tppubtype = {inbook} } @patent{Welch2021bb, title = {Low Latency Tactile Telepresence}, author = {Gregory Welch and Ryan P. McMahan and Gerd Bruder}, url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=/netahtml/PTO/search-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=11106357&OS=11106357&RS=11106357 https://sreal.ucf.edu/wp-content/uploads/2021/09/US11106357.pdf}, year = {2021}, date = {2021-08-31}, urldate = {2021-08-31}, number = {US 11,106,357}, abstract = {A system for remote tactile telepresence wherein an array of predefined touch gestures are abstracted into cataloged values and invoked either by pattern matching, by assigned name or visual indicia. A local and remote cache of the catalog reduces latency even for complicated gestures as only a gesture identifier needs to be transmitted to a haptic output destination. Additional embodiments translate gestures to different haptic device affordances. Tactile telepresence sessions are time-coded along with audiovisual content wherein playback is heard, seen, and felt. Another embodiment associates motion capture associated with the tactile profile so that remote, haptic recipients may see renderings of objects (e.g., hands) imparting vibrotactile sensations.}, keywords = {A-gb, A-gfw}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Erickson2020fb, title = {Beyond Visible Light: User and Societal Impacts of Egocentric Multispectral Vision}, author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, editor = {J. Y. C. Chen and G. Fragomeni}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/03/VAMR21-MSV.pdf}, doi = {10.1007/978-3-030-77599-5_23}, year = {2021}, date = {2021-07-24}, booktitle = {In Proceedings of the 2021 International Conference on Virtual, Augmented, and Mixed Reality}, number = {23}, pages = {19}, publisher = {Springer Nature}, address = {Washington D.C}, abstract = {Multi-spectral imagery is becoming popular for a wide range of application fields from agriculture to healthcare, mainly stemming from advances in consumer sensor and display technologies. Modern augmented reality (AR) head-mounted displays already combine a multitude of sensors and are well-suited for integration with additional sensors, such as cameras capturing information from different parts of the electromagnetic spectrum. In this paper, we describe a novel multi-spectral vision prototype based on the Microsoft HoloLens 1, which we extended with two thermal infrared (IR) cameras and two ultraviolet (UV) cameras. We performed an exploratory experiment, in which participants wore the prototype for an extended period of time and assessed its potential to augment our daily activities. Our report covers a discussion of qualitative insights on personal and societal uses of such novel multi-spectral vision systems, including their applicability for use during the COVID-19 pandemic}, keywords = {A-ae, A-gb, A-gfw, A-kk, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Schubert2021mixed, title = {Mixed Reality Technology Capabilities for Combat-Casualty Handoff Training}, author = {Ryan Schubert and Gerd Bruder and Alyssa Tanaka and Francisco Guido-Sanz and Gregory F. Welch}, editor = {Jessie Y. C. Chen and Gino Fragomeni}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/07/Schubert2021_MixedRealityTechnologyCapabiliesForCombatCasualtyHandoffTraining-2.pdf}, doi = {10.1007/978-3-030-77599-5_47}, isbn = {978-3-030-77599-5}, year = {2021}, date = {2021-07-03}, booktitle = {International Conference on Human-Computer Interaction}, volume = {12770}, pages = {695-711}, publisher = {Springer International Publishing}, address = {Cham}, abstract = {Patient handoffs are a common, yet frequently error prone occurrence, particularly in complex or challenging battlefield situations. Specific protocols exist to help simplify and reinforce conveying of necessary information during a combat-casualty handoff, and training can both reinforce correct behavior and protocol usage while providing relatively safe initial exposure to many of the complexities and variabilities of real handoff situations, before a patient’s life is at stake. Here we discuss a variety of mixed reality capabilities and training contexts that can manipulate many of these handoff complexities in a controlled manner. We finally discuss some future human-subject user study design considerations, including aspects of handoff training, evaluation or improvement of a specific handoff protocol, and how the same technology could be leveraged for operational use.}, keywords = {A-gb, A-gfw, A-rs, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2021, title = {Relative Pose Data Augmentation of Tracked Devices in Virtual Environments}, author = {Gregory Welch and Gerd Bruder}, url = {https://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=11042028&OS=11042028&RS=11042028 https://sreal.ucf.edu/wp-content/uploads/2021/09/Welch2021wa-002.pdf }, year = {2021}, date = {2021-06-22}, urldate = {2021-06-22}, number = {US 11,042,028 B1}, abstract = {This invention relates to tracking of user-worn and hand-held devices with respect to each other, in circumstances where there are two or more users interacting in the same share space. It extends conventional global and body-relative approaches to "cooperatively" estimate the relative poses between all useful combinations of user-worn tracked devices such as HMDs and hand-held controllers worn (or held) by multiple users. For example, a first user's HMD estimates its absolute global pose in the coordinate frame associated with the externally-mounted devices, as well as its relative pose with respect to all other HMDs, hand-held controllers, and other user held/worn tracked devices in the environment. In this way, all HMDs (or as many as appropriate) are tracked with respect to each other, all HMDs are tracked with respect to all hand-held controllers, and all hand-held controllers are tracked with respect to all other hand-held controllers.}, keywords = {A-gb, A-gfw}, pubstate = {published}, tppubtype = {patent} } @misc{Anderson2021b, title = {Poster: Using XR Technology to Innovate Healthcare Education}, author = {Mindi Anderson and Frank Guido-Sanz and Desiree A. Díaz and Gregory F. Welch and Laura Gonzalez}, url = {https://www.inacsl.org/education/future-conferences/ https://sreal.ucf.edu/wp-content/uploads/2021/11/1559212-1621968939.pdf}, year = {2021}, date = {2021-06-20}, urldate = {2021-06-20}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {presentation} } @article{Anderson2021ve, title = {Exploration of a Capture and Analysis System to Identify What a Good Debriefer Looks Like}, author = {Mindi Anderson and Desiree Diaz and Steven Talbert and Laura Gonzalez and Syretta Spears and Melanie Keiffer and Frank Guido-Sanz and Helen Mills and Peggy Hill and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/09/Anderson2021ve.pdf}, year = {2021}, date = {2021-06-01}, journal = {Simulation in Healthcare}, volume = {16}, number = {3}, pages = {e46--e93}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Erickson2021, title = {An Extended Analysis on the Benefits of Dark Mode User Interfaces in Optical See-Through Head-Mounted Displays}, author = {Austin Erickson and Kangsoo Kim and Alexis Lambert and Gerd Bruder and Michael P. Browne and Greg Welch}, editor = {Victoria Interrante and Martin Giese}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/03/ACM_TAP2020_DarkMode1_5.pdf}, doi = {https://doi.org/10.1145/3456874}, year = {2021}, date = {2021-05-20}, journal = {ACM Transactions on Applied Perception}, volume = {18}, number = {3}, pages = {22}, abstract = {Light-on-dark color schemes, so-called “Dark Mode,” are becoming more and more popular over a wide range of display technologies and application fields. Many people who have to look at computer screens for hours at a time, such as computer programmers and computer graphics artists, indicate a preference for switching colors on a computer screen from dark text on a light background to light text on a dark background due to perceived advantages related to visual comfort and acuity, specifically when working in low-light environments. In this paper, we investigate the effects of dark mode color schemes in the field of optical see-through head-mounted displays (OST-HMDs), where the characteristic “additive” light model implies that bright graphics are visible but dark graphics are transparent. We describe two human-subject studies in which we evaluated a normal and inverted color mode in front of different physical backgrounds and different lighting conditions. Our results indicate that dark mode graphics displayed on the HoloLens have significant benefits for visual acuity, and usability, while user preferences depend largely on the lighting in the physical environment. We discuss the implications of these effects on user interfaces and applications.}, keywords = {A-ae, A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Furuya2021, title = {Autonomous Vehicle Visual Embodiment for Pedestrian Interactions in Crossing Scenarios: Virtual Drivers in AVs for Pedestrian Crossing}, author = {Hiroshi Furuya and Kangsoo Kim and Gerd Bruder and Pamela J. Wisniewski and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/06/Furuya2021.pdf}, doi = {10.1145/3411763.3451626}, isbn = {9781450380959}, year = {2021}, date = {2021-05-08}, booktitle = {Extended Abstracts of the 2021 CHI Conference on Human Factors in Computing Systems}, number = {304}, pages = {7}, publisher = {Association for Computing Machinery}, address = {New York, NY, USA}, series = {CHI EA'21}, abstract = {This work presents a novel prototype autonomous vehicle (AV) human-machine interface (HMI) in virtual reality (VR) that utilizes a human-like visual embodiment in the driver's seat of an AV to communicate AV intent to pedestrians in a crosswalk scenario. There is currently a gap in understanding the use of virtual humans in AV HMIs for pedestrian crossing despite the demonstrated efficacy of human-like interfaces in improving human-machine relationships. We conduct a 3x2 within-subjects experiment in VR using our prototype to assess the effects of a virtual human visual embodiment AV HMI on pedestrian crossing behavior and experience. In the experiment participants walk across a virtual crosswalk in front of an AV. How long they took to decide to cross and how long it took for them to reach the other side were collected, in addition to their subjective preferences and feelings of safety. Of 26 participants, 25 preferred the condition with the most anthropomorphic features. An intermediate condition where a human-like virtual driver was present but did not exhibit any behaviors was least preferred and also had a significant effect on time to decide. This work contributes the first empirical work on using human-like visual embodiments for AV HMIs.}, keywords = {A-gb, A-gfw, A-hf, A-kk, F-NSF, F-ONR, P-ARA, P-EICAR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Anderson2021, title = {Augmented Reality in Nurse Practitioner Education: Using a Triage Scenario to Pilot Technology Usability and Effectiveness}, author = {Mindi Anderson and Frank Guido-Sanz and Desiree A. Díaz and Benjamin Lok and Jacob Stuart and Ilerioluwa Akinnola and Gregory Welch}, url = {https://www.sciencedirect.com/science/article/pii/S1876139921000098 https://sreal.ucf.edu/wp-content/uploads/2022/08/MindiAnderson2021qd.pdf }, issn = {1876-1399}, year = {2021}, date = {2021-05-01}, urldate = {2021-05-01}, journal = {Clinical Simulation in Nursing}, volume = {54}, pages = {105-112}, abstract = {Background Before implementation, simulations and new technologies should be piloted for usability and effectiveness. Simulationists and augmented reality (AR) researchers developed an augmented reality (AR) triage scenario for Nurse Practitioner (NP) students. Methods A mixed-method, exploratory, pilot study was carried out with NP students and other volunteers. Participants completed several tools to appraise the usability of the AR modality and the effectiveness of the scenario for learning. Open-ended questions were asked, and qualitative themes were obtained via content analysis. Results Mixed results were received by the twelve participants (8 students, 4 other volunteers). There were some issues with usability, and technical challenges occurred. The debriefing was found to be effective, and favorable comments were made on simulation realism. Further preparation for the content and technology, along with more practice, was inferred. Those with reported previous AR experience found the experience more effective. Conclusions Further improvements are needed with usability of the AR modality. Debriefing can be effective and the simulation realistic. Participants need further preparation in triaging and use of the technology, and more practice is needed. AR simulations have promise for use in NP education.}, keywords = {A-gfw, F-NSF, P-ARA, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Choudhary2021, title = {Revisiting Distance Perception with Scaled Embodied Cues in Social Virtual Reality}, author = {Zubin Choudhary and Matt Gottsacker and Kangsoo Kim and Ryan Schubert and Jeanine Stefanucci and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/04/C2593-Revisiting-Distance-Perception-with-Scaled-Embodied-Cues-in-Social-Virtual-Reality-7.pdf}, year = {2021}, date = {2021-04-01}, publisher = {IEEE Virtual Reality (VR), 2021}, abstract = {Previous research on distance estimation in virtual reality (VR) has well established that even for geometrically accurate virtual objects and environments users tend to systematically misestimate distances. This has implications for Social VR, where it introduces variables in personal space and proxemics behavior that change social behaviors compared to the real world. One yet unexplored factor is related to the trend that avatars’ embodied cues in Social VR are often scaled, e.g., by making one’s head bigger or one’s voice louder, to make social cues more pronounced over longer distances. In this paper we investigate how the perception of avatar distance is changed based on two means for scaling embodied social cues: visual head scale and verbal volume scale. We conducted a human subject study employing a mixed factorial design with two Social VR avatar representations (full-body, head-only) as a between factor as well as three visual head scales and three verbal volume scales (up-scaled, accurate, down-scaled) as within factors. For three distances from social to far-public space, we found that visual head scale had a significant effect on distance judgments and should be tuned for Social VR, while conflicting verbal volume scales did not, indicating that voices can be scaled in Social VR without immediate repercussions on spatial estimates. We discuss the interactions between the factors and implications for Social VR. }, keywords = {A-gb, A-gfw, A-kk, A-mg, A-rs, A-zc, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Norouzi2020d, title = {[Demo] Towards Interactive Virtual Dogs as a Pervasive Social Companion in Augmented Reality}, author = {Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/029-030.pdf}, doi = {https://doi.org/10.2312/egve.20201283}, year = {2020}, date = {2020-12-04}, booktitle = {Proceedings of the combined International Conference on Artificial Reality & Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE).}, pages = {29-30}, abstract = {Pets and animal-assisted intervention sessions have shown to be beneficial for humans' mental, social, and physical health. However, for specific populations, factors such as hygiene restrictions, allergies, and care and resource limitations reduce interaction opportunities. In parallel, understanding the capabilities of animals' technological representations, such as robotic and digital forms, have received considerable attention and has fueled the utilization of many of these technological representations. Additionally, recent advances in augmented reality technology have allowed for the realization of virtual animals with flexible appearances and behaviors to exist in the real world. In this demo, we present a companion virtual dog in augmented reality that aims to facilitate a range of interactions with populations, such as children and older adults. We discuss the potential benefits and limitations of such a companion and propose future use cases and research directions.}, note = {Best Demo Audience Choice Award}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2020e, title = {A Review of Visual Perception Research in Optical See-Through Augmented Reality}, author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2021/05/DarkModeSurvey_ICAT_EGVE_2020.pdf}, doi = {10.2312/egve.20201256}, isbn = {978-3-03868-111-3}, year = {2020}, date = {2020-12-02}, booktitle = {In Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments}, pages = {8}, publisher = {The Eurographics Association}, organization = {The Eurographics Association}, abstract = {In the field of augmented reality (AR), many applications involve user interfaces (UIs) that overlay visual information over the user's view of their physical environment, e.g., as text, images, or three-dimensional scene elements. In this scope, optical see-through head-mounted displays (OST-HMDs) are particularly interesting as they typically use an additive light model, which denotes that the perception of the displayed virtual imagery is a composite of the lighting conditions of one's environment, the coloration of the objects that make up the virtual imagery, and the coloration of physical objects that lay behind them. While a large body of literature focused on investigating the visual perception of UI elements in immersive and flat panel displays, comparatively less effort has been spent on OST-HMDs. Due to the unique visual effects with OST-HMDs, we believe that it is important to review the field to understand the perceptual challenges, research trends, and future directions. In this paper, we present a systematic survey of literature based on the IEEE and ACM digital libraries, which explores users' perception of displaying text-based information on an OST-HMD, and aim to provide relevant design suggestions based on the meta-analysis results. We carefully review 14 key papers relevant to the visual perception research in OST-HMDs with UI elements, and present the current state of the research field, associated trends, noticeable research gaps in the literature, and recommendations for potential future research in this domain. }, keywords = {A-ae, A-gb, A-gfw, A-kk, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Norouzi2020c, title = {A Systematic Literature Review of Embodied Augmented Reality Agents in Head-Mounted Display Environments}, author = {Nahal Norouzi and Kangsoo Kim and Gerd Bruder and Austin Erickson and Zubin Choudhary and Yifan Li and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/11/IVC_ICAT_EGVE2020.pdf https://www.youtube.com/watch?v=IsX5q86pH4M}, year = {2020}, date = {2020-12-02}, urldate = {2020-12-02}, booktitle = {In Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments}, pages = {11}, abstract = {Embodied agents, i.e., computer-controlled characters, have proven useful for various applications across a multitude of display setups and modalities. While most traditional work focused on embodied agents presented on a screen or projector, and a growing number of works are focusing on agents in virtual reality, a comparatively small number of publications looked at such agents in augmented reality (AR). Such AR agents, specifically when using see-through head-mounted displays (HMDs)as the display medium, show multiple critical differences to other forms of agents, including their appearances, behaviors, and physical-virtual interactivity. Due to the unique challenges in this specific field, and due to the comparatively limited attention by the research community so far, we believe that it is important to map the field to understand the current trends, challenges, and future research. In this paper, we present a systematic review of the research performed on interactive, embodied AR agents using HMDs. Starting with 1261 broadly related papers, we conducted an in-depth review of 50 directly related papers from2000 to 2020, focusing on papers that reported on user studies aiming to improve our understanding of interactive agents in AR HMD environments or their utilization in specific applications. We identified common research and application areas of AR agents through a structured iterative process, present research trends, and gaps, and share insights on future directions.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, A-zc, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zehtabian2020aav, title = {[Poster] An Automated Virtual Receptionist for Recognizing Visitors and Assuring Mask Wearing}, author = {Sharare Zehtabian and Siavash Khodadadeh and Kangsoo Kim and Gerd Bruder and Greg Welch and Ladislau Bölöni and Damla Turgut}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/VirtualReceptionist_Poster_ICAT_EGVE2020.pdf https://www.youtube.com/watch?v=r6bXNPn3lWU&feature=emb_logo}, doi = {10.2312/egve.20201273}, year = {2020}, date = {2020-12-02}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments}, pages = {9-10}, keywords = {A-gb, A-gfw, A-kk, F-NSF, P-ARA, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2020f, title = {[Demo] Dark/Light Mode Adaptation for Graphical User Interfaces on Near-Eye Displays}, author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, editor = {Kulik, Alexander and Sra, Misha and Kim, Kangsoo and Seo, Byung-Kuk}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/DarkmodeDEMO_ICAT_EGVE_2020-2.pdf https://www.youtube.com/watch?v=VJQTaYyofCw&t=61s }, doi = {https://doi.org/10.2312/egve.20201280}, isbn = {978-3-03868-112-0}, year = {2020}, date = {2020-12-02}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence & Eurographics Symposium on Virtual Environments}, pages = {23-24}, publisher = {The Eurographics Association}, organization = {The Eurographics Association}, abstract = {In the fields of augmented reality (AR) and virtual reality (VR), many applications involve user interfaces (UIs) to display various types of information to users. Such UIs are an important component that influences user experience and human factors in AR/VR because the users are directly facing and interacting with them to absorb the visualized information and manipulate the content. While consumer’s interests in different forms of near-eye displays, such as AR/VR head-mounted displays (HMDs), are increasing, research on the design standard for AR/VR UIs and human factors becomes more and more interesting and timely important. Although UI configurations, such as dark mode and light mode, have increased in popularity on other display types over the last several years, they have yet to make their way into AR/VR devices as built in features. This demo showcases several use cases of dark mode and light mode UIs on AR/VR HMDs, and provides general guidelines for when they should be used to provide perceptual benefits to the user}, keywords = {A-ae, A-gb, A-gfw, A-kk, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Mostofa2020aa, title = {[POSTER] Tactile Telepresence for Isolated Patients}, author = {Nafisa Mostofa and Indira Avendano and Ryan P. McMahan and Greg Welch}, editor = {Kulik, Alexander and Sra, Misha and Kim, Kangsoo and Seo, Byung-Kuk}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/ICAT-EGVE2020_PosterExtendedAbstract-TTIP.pdf https://www.youtube.com/watch?v=5Dmxzd58rOk&feature=emb_logo}, doi = {10.2312/egve.20201272}, isbn = {978-3-03868-112-0}, year = {2020}, date = {2020-12-02}, booktitle = {ICAT-EGVE 2020 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments - Posters and Demos}, journal = {ICAT-EGVE 2020 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments - Posters and Demos}, pages = {7--8}, publisher = {The Eurographics Association}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2020c, title = {Adaptive Visual Overlay Wound Simulation}, author = {Gregory Welch and Joseph LaViola Jr. and Francisco Guido-Sanz and Gerd Bruder and Mindi Anderson and Ryan Schubert}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/US10854098.pdf http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=/netahtml/PTO/srchnum.htm&r=1&f=G&l=50&s1=10,854,098}, year = {2020}, date = {2020-12-01}, number = {US 10,854,098 B1}, abstract = {A wound simulation unit is a physical device designed to help simulate a wound on an object (e.g., a human being or human surrogate such as a medical manikin) for instructing a trainee to learn or practice wound-related treatment skills. For the trainee, the simulation looks like a real wound when viewed using an Augmented Reality (AR) system. Responsive to a change in the anatomic state of the object (e.g., bending a knee or raising o f an arm) not only the spatial location and orientation of the wound stays locked on the object in the AR system, but the characteristics of the wound change based on the physiologic logic o f changing said anatomical state (e.g., greater or less blood flow, opening or closing of the wound).}, keywords = {A-gb, A-gfw, A-rs}, pubstate = {published}, tppubtype = {patent} } @inbook{Welch2020ab, title = {Kalman Filter}, author = {Gregory F. Welch}, editor = {Jim Rehg}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/12/Welch2020ae.pdf}, doi = {10.1007/978-3-030-03243-2_716-1}, isbn = {978-3-030-03243-2}, year = {2020}, date = {2020-12-01}, booktitle = {Computer Vision: A Reference Guide}, pages = {1--3}, publisher = {Springer International Publishing}, address = {Cham, Switzerland}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{DeMelo2020rcl, title = {Reducing Cognitive Load and Improving Warfighter Problem Solving with Intelligent Virtual Assistants}, author = {Celso M. de Melo and Kangsoo Kim and Nahal Norouzi and Gerd Bruder and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/11/Melo2020aa-2.pdf}, doi = {10.3389/fpsyg.2020.554706}, year = {2020}, date = {2020-11-17}, journal = {Frontiers in Psychology}, volume = {11}, number = {554706}, pages = {1-12}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Erickson2020d, title = {Exploring the Limitations of Environment Lighting on Optical See-Through Head-Mounted Displays}, author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/09/sui20a-sub1047-cam-i26-1.pdf https://youtu.be/3jJ-j35oO1I}, doi = {10.1145/3385959.3418445}, isbn = {9781450379434}, year = {2020}, date = {2020-10-31}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction }, pages = {1-8}, publisher = {ACM}, address = {New York, NY, USA}, organization = {Association for Computing Machinery}, series = {SUI '20}, abstract = {Due to the additive light model employed by most optical see-through head-mounted displays (OST-HMDs), they provide the best augmented reality (AR) views in dark environments, where the added AR light does not have to compete against existing real-world lighting. AR imagery displayed on such devices loses a significant amount of contrast in well-lit environments such as outdoors in direct sunlight. To compensate for this, OST-HMDs often use a tinted visor to reduce the amount of environment light that reaches the user’s eyes, which in turn results in a loss of contrast in the user’s physical environment. While these effects are well known and grounded in existing literature, formal measurements of the illuminance and contrast of modern OST-HMDs are currently missing. In this paper, we provide illuminance measurements for both the Microsoft HoloLens 1 and its successor the HoloLens 2 under varying environment lighting conditions ranging from 0 to 20,000 lux. We evaluate how environment lighting impacts the user by calculating contrast ratios between rendered black (transparent) and white imagery displayed under these conditions, and evaluate how the intensity of environment lighting is impacted by donning and using the HMD. Our results indicate the further need for refinement in the design of future OST-HMDs to optimize contrast in environments with illuminance values greater than or equal to those found in indoor working environments.}, keywords = {A-ae, A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2020b, title = {Multisensory Wound Simulation}, author = {Gregory Welch and Joseph LaViola Jr. and Francisco Guido-Sanz and Gerd Bruder and Mindi Anderson and Ryan Schubert}, url = {http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=/netahtml/PTO/srchnum.htm&r=1&f=G&l=50&s1=10803761.PN.&OS=PN/10803761&RS=PN/10803761 https://sreal.ucf.edu/wp-content/uploads/2020/10/welch2020b.pdf}, year = {2020}, date = {2020-10-13}, number = {US 10,803,761 B2}, abstract = {A Tactile-Visual Wound (TVW) simulation unit is a physical device designed to help simulate a wound on a human being or human surrogate (e.g., a medical manikin) for instructing a trainee to learn or practice wound-related treatment skills. For the trainee, the TVW would feel (to the touch) like a real wound, look like a real wound when viewed using an Augmented Reality (AR) system, and appear to behave like a real wound when manipulated.}, keywords = {A-gb, A-gfw, A-rs, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Welch2020aa, title = {Augmented Reality Promises Mentally and Physically Stressful Training in Real Places}, author = {Gregory F Welch and Ryan Schubert and Gerd Bruder and Derrick P Stockdreher and Adam Casebolt}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/10/Welch2020aa.pdf}, year = {2020}, date = {2020-10-05}, journal = {IACLEA Campus Law Enforcement Journal}, volume = {50}, number = {5}, pages = {47--50}, keywords = {A-gb, A-gfw, A-rs, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Kim2020mia, title = {Multimodal interfaces and communication cues for remote collaboration}, author = {Seungwon Kim and Mark Billinghurst and Kangsoo Kim}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/10/Kim2020mia_Submission.pdf}, doi = {10.1007/s12193-020-00346-8}, issn = {1783-7677}, year = {2020}, date = {2020-10-03}, journal = {Journal on Multimodal User Interfaces}, volume = {14}, number = {4}, pages = {313-319}, note = {Special Issue Editorial}, keywords = {A-kk, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Lambert2020, title = {A Systematic Review of Ten Years of Research on Human Interaction with Social Robots}, author = {Alexis Lambert and Nahal Norouzi and Gerd Bruder and Greg Welch }, editor = {Constantine Stephanidis}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/08/8_25_2020_A-Systemat.pdf}, doi = {10.1080/10447318.2020.1801172}, year = {2020}, date = {2020-08-25}, journal = {International Journal of Human-Computer Interaction}, pages = {10}, abstract = {While research and development related to robotics has been going on for decades, the past decade in particular has seen a marked increase in related efforts, in part due to technological advances, increased technological accessibility and reliability, and increased commercial availability. What have come to be known as social robots are now being used to explore novel forms of human-robot interaction, to understand social norms, and to test expectations and human responses. To capture the contributions of these research efforts, identify the current trends, and future directions, we systematically review ten years of research in the field of social robotics between 2008 and 2018, which includes 86 publications with 70 user studies. We classify the past work based on the research topics and application areas, and provide information about the publications, their user studies, and the capabilities of the social robots utilized. We also discuss selected papers in detail and outline overall trends. Based on these findings, we identify some areas of potential future research.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Gonzalez2020aa, title = {Neurological Assessment Using a Physical-Virtual Patient (PVP)}, author = {Laura Gonzalez and Salam Daher and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/08/Gonzalez2020aa.pdf}, year = {2020}, date = {2020-08-12}, journal = {Simulation & Gaming}, pages = {1--17}, keywords = {A-gfw, A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {article} } @article{EricksonNorouzi2020, title = {Sharing gaze rays for visual target identification tasks in collaborative augmented reality}, author = {Austin Erickson and Nahal Norouzi and Kangsoo Kim and Ryan Schubert and Jonathan Jules and Joseph J. LaViola Jr. and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/07/Erickson2020_Article_SharingGazeRaysForVisualTarget.pdf}, doi = {https://doi.org/10.1007/s12193-020-00330-2}, issn = {1783-8738}, year = {2020}, date = {2020-07-09}, urldate = {2020-07-09}, journal = {Journal on Multimodal User Interfaces: Special Issue on Multimodal Interfaces and Communication Cues for Remote Collaboration}, volume = {14}, number = {4}, pages = {353-371}, abstract = {Augmented reality (AR) technologies provide a shared platform for users to collaborate in a physical context involving both real and virtual content. To enhance the quality of interaction between AR users, researchers have proposed augmenting users’ interpersonal space with embodied cues such as their gaze direction. While beneficial in achieving improved interpersonal spatial communication, such shared gaze environments suffer from multiple types of errors related to eye tracking and networking, that can reduce objective performance and subjective experience. In this paper, we present a human-subjects study to understand the impact of accuracy, precision, latency, and dropout based errors on users’ performance when using shared gaze cues to identify a target among a crowd of people. We simulated varying amounts of errors and the target distances and measured participants’ objective performance through their response time and error rate, and their subjective experience and cognitive load through questionnaires. We found significant differences suggesting that the simulated error levels had stronger effects on participants’ performance than target distance with accuracy and latency having a high impact on participants’ error rate. We also observed that participants assessed their own performance as lower than it objectively was. We discuss implications for practical shared gaze applications and we present a multi-user prototype system.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, A-rs, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Ghosh2020cot, title = {Circle of Trust: A New Approach to Mobile Online Safety for Teens and Parents}, author = {Arup Kumar Ghosh and Charles E. Hughes and Pamela J. Wisniewski }, doi = {10.1145/3313831.3376747}, year = {2020}, date = {2020-04-25}, booktitle = {Proceedings of CHI Conference on Human Factors in Computing Systems}, pages = {618:1-14}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Daher2020aa, title = {The Physical-Virtual Patient Simulator: A Physical Human Form with Virtual Appearance and Behavior}, author = {Salam Daher and Jason Hochreiter and Ryan Schubert and Laura Gonzalez and Juan Cendan and Mindi Anderson and Desiree A Diaz and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/06/Daher2020aa1.pdf https://journals.lww.com/simulationinhealthcare/Fulltext/2020/04000/The_Physical_Virtual_Patient_Simulator__A_Physical.9.aspx https://journals.lww.com/simulationinhealthcare/Fulltext/2020/06000/Erratum_to_the_Physical_Virtual_Patient_Simulator_.12.aspx}, doi = {10.1097/SIH.0000000000000409}, year = {2020}, date = {2020-04-01}, journal = {Simulation in Healthcare}, volume = {15}, number = {2}, pages = {115--121}, note = {see erratum at DOI: 10.1097/SIH.0000000000000481}, keywords = {A-gfw, A-jh, A-rs, A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Kim2020rtl, title = {Reducing Task Load with an Embodied Intelligent Virtual Assistant for Improved Performance in Collaborative Decision Making}, author = {Kangsoo Kim and Celso M. de Melo and Nahal Norouzi and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/IEEEVR2020_ARDesertSurvival.pdf https://www.youtube.com/watch?v=G_iZ_asjp3I&t=6s, YouTube Presentation}, doi = {10.1109/VR46266.2020.00-30}, year = {2020}, date = {2020-03-23}, booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR)}, pages = {529-538}, address = {Atlanta, Georgia}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2020, title = {Effects of Dark Mode Graphics on Visual Acuity and Fatigue with Virtual Reality Head-Mounted Displays}, author = {Austin Erickson and Kangsoo Kim and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/VR2020_DarkMode2_0.pdf https://www.youtube.com/watch?v=wePUk0xTLA0&t=5s, YouTube Presentation}, doi = {10.1109/VR46266.2020.00-40}, year = {2020}, date = {2020-03-23}, urldate = {2020-03-23}, booktitle = {Proceedings of IEEE International Conference on Virtual Reality and 3D User Interfaces (IEEE VR)}, pages = {434-442}, address = {Atlanta, Georgia}, abstract = {Current virtual reality (VR) head-mounted displays (HMDs) are characterized by a low angular resolution that makes it difficult to make out details, leading to reduced legibility of text and increased visual fatigue. Light-on-dark graphics modes, so-called ``dark mode'' graphics, are becoming more and more popular over a wide range of display technologies, and have been correlated with increased visual comfort and acuity, specifically when working in low-light environments, which suggests that they might provide significant advantages for VR HMDs. In this paper, we present a human-subject study investigating the correlations between the color mode and the ambient lighting with respect to visual acuity and fatigue on VR HMDs. We compare two color schemes, characterized by light letters on a dark background (dark mode), or dark letters on a light background (light mode), and show that the dark background in dark mode provides a significant advantage in terms of reduced visual fatigue and increased visual acuity in dim virtual environments on current HMDs. Based on our results, we discuss guidelines for user interfaces and applications.}, keywords = {A-ae, A-gb, A-gfw, A-kk, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2020b, title = {Examining Whether Secondary Effects of Temperature-Associated Virtual Stimuli Influence Subjective Perception of Duration}, author = {Austin Erickson and Gerd Bruder and Pamela J. Wisniewski and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/TimePerception_VR2020.pdf https://www.youtube.com/watch?v=kG2M-cbjS3s&t=1s, YouTube Presentation}, doi = {10.1109/VR46266.2020.00-34}, year = {2020}, date = {2020-03-23}, urldate = {2020-03-23}, booktitle = {Proceedings of IEEE International Conference on Virtual Reality and 3D User Interfaces (IEEE VR)}, pages = {493-499}, address = {Atlanta, Georgia}, abstract = {Past work in augmented reality has shown that temperature-associated AR stimuli can induce warming and cooling sensations in the user, and prior work in psychology suggests that a person's body temperature can influence that person's sense of subjective perception of duration. In this paper, we present a user study to evaluate the relationship between temperature-associated virtual stimuli presented on an AR-HMD and the user's sense of subjective perception of duration and temperature. In particular, we investigate two independent variables: the apparent temperature of the virtual stimuli presented to the participant, which could be hot or cold, and the location of the stimuli, which could be in direct contact with the user, in indirect contact with the user, or both in direct and indirect contact simultaneously. We investigate how these variables affect the users' perception of duration and perception of body and environment temperature by having participants make prospective time estimations while observing the virtual stimulus and answering subjective questions regarding their body and environment temperatures. Our work confirms that temperature-associated virtual stimuli are capable of having significant effects on the users' perception of temperature, and highlights a possible limitation in the current augmented reality technology in that no secondary effects on the users' perception of duration were observed.}, keywords = {A-ae, A-gb, A-gfw, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Choudhary2020vbh, title = {Virtual Big Heads: Analysis of Human Perception and Comfort of Head Scales in Social Virtual Reality}, author = {Zubin Choudhary and Kangsoo Kim and Ryan Schubert and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/IEEEVR2020_BigHead.pdf https://www.youtube.com/watch?v=14289nufYf0, YouTube Presentation}, doi = {10.1109/VR46266.2020.00-41}, year = {2020}, date = {2020-03-23}, booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR)}, pages = {425-433}, address = {Atlanta, Georgia}, keywords = {A-gb, A-gfw, A-kk, A-zc, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @misc{Norouzi2020, title = {Augmented Reality Animals: Are They Our Future Companions?}, author = {Nahal Norouzi }, url = {https://sreal.ucf.edu/wp-content/uploads/2020/03/vr20c-sub1054-cam-i5.pdf}, year = {2020}, date = {2020-03-22}, abstract = {Previous research in the field of human-animal interaction has captured the multitude of benefits of this relationship on different aspects of human health. Existing limitations for accompanying pets/animals in some public spaces, allergies, and inability to provide adequate care for animals/pets limits the possible benefits of this relationship. However, the increased popularity of augmented reality and virtual reality devices and the introduction of new social behaviors since their utilization offers the opportunity of using such platforms for the realization of virtual animals and investigation of their influences on human perception and behavior. In this paper, two prior experiments are presented, which were designed to provide a better understanding of the requirements of virtual animals in augmented reality as companions and investigate some of their capabilities in the provision of support. Through these findings, future research directions are identified and discussed. }, note = {IEEE VR 2020 Doctoral Consortium}, keywords = {A-gb, A-gfw, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {presentation} } @misc{Kim2020dei, title = {[Tutorial] Developing Embodied Interactive Virtual Characters for Human-Subjects Studies}, author = {Kangsoo Kim and Nahal Norouzi and Austin Erickson}, url = {https://www.youtube.com/watch?v=UgT_-LVrQlc&list=PLMvKdHzC3SyacMfUj3qqd-pIjKmjtmwnz https://sreal.ucf.edu/ieee-vr-2020-tutorial-developing-embodied-interactive-virtual-characters-for-human-subjects-studies/}, year = {2020}, date = {2020-03-22}, urldate = {2020-03-22}, booktitle = {IEEE International Conference on Virtual Reality and 3D User Interfaces (IEEE VR)}, keywords = {A-ae, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {presentation} } @inproceedings{Stuart2019asm, title = {[Poster] Applying Stress Management Techniques in Augmented Reality: Stress Induction and Reduction in Healthcare Providers during Virtual Triage Simulation}, author = {Jacob Stuart and Ileri Akinnola and Frank Guido-Sanz and Mindi Anderson and Desiree Diaz and Greg Welch and Benjamin Lok}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/07/09090656.pdf}, doi = {10.1109/VRW50115.2020.00037}, year = {2020}, date = {2020-03-22}, booktitle = {Proceedings of the IEEE Conference on Virtual Reality and 3D User Interfaces Abstracts and Workshops (VRW)}, pages = {171-172}, keywords = {A-gfw, F-NSF, P-ARA, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Thiamwong2020aa, title = {Assessing Fall Risk Appraisal Through Combined Physiological and Perceived Fall Risk Measures Using Innovative Technology}, author = {Ladda Thiamwong and Mary Lou Sole and Boon Peng and Gregory F. Welch and Helen J. Huang and Jeffrey R. Stout}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/08/Thiamwong2020aa.pdf}, year = {2020}, date = {2020-03-01}, journal = {Journal of Gerontological Nursing}, volume = {46}, number = {4}, pages = {41--47}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Erickson2020c, title = {Effects of Depth Information on Visual Target Identification Task Performance in Shared Gaze Environments}, author = {Austin Erickson and Nahal Norouzi and Kangsoo Kim and Joseph J. LaViola Jr. and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/02/shared_gaze_2_FINAL.pdf https://www.youtube.com/watch?v=JQO_iosY62Y&t=6s, YouTube Presentation}, doi = {10.1109/TVCG.2020.2973054}, issn = {1077-2626}, year = {2020}, date = {2020-02-13}, urldate = {2020-02-13}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {26}, number = {5}, pages = {1934-1944}, abstract = {Human gaze awareness is important for social and collaborative interactions. Recent technological advances in augmented reality (AR) displays and sensors provide us with the means to extend collaborative spaces with real-time dynamic AR indicators of one's gaze, for example via three-dimensional cursors or rays emanating from a partner's head. However, such gaze cues are only as useful as the quality of the underlying gaze estimation and the accuracy of the display mechanism. Depending on the type of the visualization, and the characteristics of the errors, AR gaze cues could either enhance or interfere with collaborations. In this paper, we present two human-subject studies in which we investigate the influence of angular and depth errors, target distance, and the type of gaze visualization on participants' performance and subjective evaluation during a collaborative task with a virtual human partner, where participants identified targets within a dynamically walking crowd. First, our results show that there is a significant difference in performance for the two gaze visualizations ray and cursor in conditions with simulated angular and depth errors: the ray visualization provided significantly faster response times and fewer errors compared to the cursor visualization. Second, our results show that under optimal conditions, among four different gaze visualization methods, a ray without depth information provides the worst performance and is rated lowest, while a combination of a ray and cursor with depth information is rated highest. We discuss the subjective and objective performance thresholds and provide guidelines for practitioners in this field.}, note = {Presented at IEEE VR 2020}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{State2020aa, title = {The A-Desk: A Unified Workspace of the Future}, author = {Andrei State and Herman Towles and Tyler Johnson and Ryan Schubert and Brendan Walters and Greg Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/01/State2020aa-1.pdf}, doi = {10.1109/MCG.2019.2951273}, issn = {1558-1756}, year = {2020}, date = {2020-01-01}, journal = {IEEE Computer Graphics and Applications}, volume = {40}, number = {1}, pages = {56-71}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Lee2020, title = {Mixed Reality Tabletop Gameplay: Social Interaction with a Virtual Human Capable of Physical Influence}, author = {Myungho Lee and Nahal Norouzi and Gerd Bruder and Pamela J. Wisniewski and Gregory F. Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2019/12/TVCG_Physical_Virtual_Table_2019.pdf}, doi = {10.1109/TVCG.2019.2959575}, issn = {1077-2626}, year = {2019}, date = {2019-12-18}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {24}, number = {8}, pages = {1-12}, abstract = {In this paper, we investigate the effects of the physical influence of a virtual human (VH) in the context of face-to-face interaction in a mixed reality environment. In Experiment 1, participants played a tabletop game with a VH, in which each player takes a turn and moves their own token along the designated spots on the shared table. We compared two conditions as follows: the VH in the virtual condition moves a virtual token that can only be seen through augmented reality (AR) glasses, while the VH in the physical condition moves a physical token as the participants do; therefore the VH’s token can be seen even in the periphery of the AR glasses. For the physical condition, we designed an actuator system underneath the table. The actuator moves a magnet under the table which then moves the VH’s physical token over the surface of the table. Our results indicate that participants felt higher co-presence with the VH in the physical condition, and participants assessed the VH as a more physical entity compared to the VH in the virtual condition. We further observed transference effects when participants attributed the VH’s ability to move physical objects to other elements in the real world. Also, the VH’s physical influence improved participants’ overall experience with the VH. In Experiment 2, we further looked into the question how the physical-virtual latency in movements affected the perceived plausibility of the VH’s interaction with the real world. Our results indicate that a slight temporal difference between the physical token reacting to the virtual hand’s movement increased the perceived realism and causality of the mixed reality interaction. We discuss potential explanations for the findings and implications for future shared mixed reality tabletop setups. }, keywords = {A-gb, A-gfw, A-ml, A-nn, F-ONR, P-EPICAR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Kim2019epc, title = {Effects of Patient Care Assistant Embodiment and Computer Mediation on User Experience}, author = {Kangsoo Kim and Nahal Norouzi and Tiffany Losekamp and Gerd Bruder and Mindi Anderson and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/11/AIVR2019_Caregiver.pdf}, doi = {10.1109/AIVR46125.2019.00013}, year = {2019}, date = {2019-12-09}, booktitle = {Proceedings of the IEEE International Conference on Artificial Intelligence & Virtual Reality (AIVR)}, pages = {17-24}, publisher = {IEEE}, abstract = {Providers of patient care environments are facing an increasing demand for technological solutions that can facilitate increased patient satisfaction while being cost effective and practically feasible. Recent developments with respect to smart hospital room setups and smart home care environments have an immense potential to leverage advances in technologies such as Intelligent Virtual Agents, Internet of Things devices, and Augmented Reality to enable novel forms of patient interaction with caregivers and their environment. In this paper, we present a human-subjects study in which we compared four types of simulated patient care environments for a range of typical tasks. In particular, we tested two forms of caregiver mediation with a real person or a virtual agent, and we compared two forms of caregiver embodiment with disembodied verbal or embodied interaction. Our results show that, as expected, a real caregiver provides the optimal user experience but an embodied virtual assistant is also a viable option for patient care environments, providing significantly higher social presence and engagement than voice-only interaction. We discuss the implications in the field of patient care and digital assistant.}, keywords = {A-gb, A-gfw, A-kk, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Tanaka2019aa, title = {The Development and Implementation of Speech Understanding for Medical Handoff Training}, author = {Alyssa Tanaka and Brian Stensrud and Greg Welch and Fransisco Guido-Sanz and Lee Sciarini and Henry Phillips}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/12/Tanaka2019aa.pdf}, year = {2019}, date = {2019-12-01}, booktitle = {Proceedings of 2019 Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC 2019)}, address = {Orlando, Florida, U.S.A.}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Erickson2019iic, title = {Is It Cold in Here or Is It Just Me? Analysis of Augmented Reality Temperature Visualization for Computer-Mediated Thermoception}, author = {Austin Erickson and Ryan Schubert and Kangsoo Kim and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/Erickson2019IIC.pdf}, doi = {10.1109/ISMAR.2019.00046}, isbn = {978-1-7281-4765-9}, year = {2019}, date = {2019-10-19}, urldate = {2019-10-19}, booktitle = {Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR)}, pages = {319-327}, publisher = {IEEE}, abstract = {Modern augmented reality (AR) head-mounted displays comprise a multitude of sensors that allow them to sense the environment around them. We have extended these capabilities by mounting two heat-wavelength infrared cameras to a Microsoft HoloLens, facilitating the acquisition of thermal data and enabling stereoscopic thermal overlays in the user’s augmented view. The ability to visualize live thermal information opens several avenues of investigation on how that thermal awareness may affect a user’s thermoception. We present a human-subject study, in which we simulated different temperature shifts using either heat vision overlays or 3D AR virtual effects associated with thermal cause-effect relationships (e.g., flames burn and ice cools). We further investigated differences in estimated temperatures when the stimuli were applied to either the user’s body or their environment. Our analysis showed significant effects and first trends for the AR virtual effects and heat vision, respectively, on participants’ temperature estimates for their body and the environment though with different strengths and characteristics, which we discuss in this paper. }, keywords = {A-ae, A-gb, A-gfw, A-kk, A-rs, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Richards2019b, title = {Analysis of Peripheral Vision and Vibrotactile Feedback During Proximal Search Tasks with Dynamic Virtual Entities in Augmented Reality}, author = {Kendra Richards and Nikhil Mahalanobis and Kangsoo Kim and Ryan Schubert and Myungho Lee and Salam Daher and Nahal Norouzi and Jason Hochreiter and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/Richards2019b.pdf}, doi = {10.1145/3357251.3357585}, isbn = {978-1-4503-6975-6/19/10}, year = {2019}, date = {2019-10-19}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {3:1-3:9}, publisher = {ACM}, abstract = {A primary goal of augmented reality (AR) is to seamlessly embed virtual content into a real environment. There are many factors that can affect the perceived physicality and co-presence of virtual entities, including the hardware capabilities, the fidelity of the virtual behaviors, and sensory feedback associated with the interactions. In this paper, we present a study investigating participants' perceptions and behaviors during a time-limited search task in close proximity with virtual entities in AR. In particular, we analyze the effects of (i) visual conflicts in the periphery of an optical see-through head-mounted display, a Microsoft HoloLens, (ii) overall lighting in the physical environment, and (iii) multimodal feedback based on vibrotactile transducers mounted on a physical platform. Our results show significant benefits of vibrotactile feedback and reduced peripheral lighting for spatial and social presence, and engagement. We discuss implications of these effects for AR applications.}, keywords = {A-gb, A-gfw, A-jh, A-kk, A-ml, A-nn, A-rs, A-sd, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Kim2019edm, title = {Effects of Dark Mode on Visual Fatigue and Acuity in Optical See-Through Head-Mounted Displays}, author = {Kangsoo Kim and Austin Erickson and Alexis Lambert and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/Kim2019edm.pdf}, doi = {10.1145/3357251.3357584}, isbn = {978-1-4503-6975-6/19/10}, year = {2019}, date = {2019-10-19}, urldate = {2019-10-19}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {9:1-9:9}, publisher = {ACM}, abstract = {Light-on-dark color schemes, so-called "Dark Mode," are becoming more and more popular over a wide range of display technologies and application fields. Many people who have to look at computer screens for hours at a time, such as computer programmers and computer graphics artists, indicate a preference for switching colors on a computer screen from dark text on a light background to light text on a dark background due to perceived advantages related to visual comfort and acuity, specifically when working in low-light environments. In this paper, we investigate the effects of dark mode color schemes in the field of optical see-through head-mounted displays (OST-HMDs), where the characteristic "additive" light model implies that bright graphics are visible but dark graphics are transparent. We describe a human-subject study in which we evaluated a normal and inverted color mode in front of different physical backgrounds and among different lighting conditions. Our results show that dark mode graphics on OST-HMDs have significant benefits for visual acuity, fatigue, and usability, while user preferences depend largely on the lighting in the physical environment. We discuss the implications of these effects on user interfaces and applications.}, keywords = {A-ae, A-gb, A-gfw, A-kk, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Norouzi2019esg, title = {Effects of Shared Gaze Parameters on Visual Target Identification Task Performance in Augmented Reality}, author = {Nahal Norouzi and Austin Erickson and Kangsoo Kim and Ryan Schubert and Joseph J. LaViola Jr. and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/a12-norouzi.pdf}, doi = {10.1145/3357251.3357587}, isbn = {978-1-4503-6975-6/19/10}, year = {2019}, date = {2019-10-19}, urldate = {2019-10-19}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {12:1-12:11}, publisher = {ACM}, abstract = {Augmented reality (AR) technologies provide a shared platform for users to collaborate in a physical context involving both real and virtual content. To enhance the quality of interaction between AR users, researchers have proposed augmenting users' interpersonal space with embodied cues such as their gaze direction. While beneficial in achieving improved interpersonal spatial communication, such shared gaze environments suffer from multiple types of errors related to eye tracking and networking, that can reduce objective performance and subjective experience. In this paper, we conducted a human-subject study to understand the impact of accuracy, precision, latency, and dropout based errors on users' performance when using shared gaze cues to identify a target among a crowd of people. We simulated varying amounts of errors and the target distances and measured participants' objective performance through their response time and error rate, and their subjective experience and cognitive load through questionnaires. We found some significant differences suggesting that the simulated error levels had stronger effects on participants' performance than target distance with accuracy and latency having a high impact on participants' error rate. We also observed that participants assessed their own performance as lower than it objectively was, and we discuss implications for practical shared gaze applications.}, note = {Best Paper Award}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-nn, A-rs, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @workshop{Norouzi2019f, title = {Investigating Augmented Reality Animals as Companions}, author = {Nahal Norouzi and Gerd Bruder and Jeremy Bailenson and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/ISMAR_Workshop_Paper__NN.pdf}, doi = {10.1109/ISMAR-Adjunct.2019.00104}, isbn = {978-1-7281-4765-9}, year = {2019}, date = {2019-10-18}, booktitle = {Adjunct proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR) Mixed/Augmented Reality and Mental Health Workshop, 2019}, pages = {371-374}, publisher = {IEEE}, abstract = {Human-animal interaction has been studied in a variety of settings and for a range of populations, with some findings pointing towards its benefits for physical, mental and social human health. Technological advances opened up new opportunities for researchers to replicate human-animal interactions with robotic and graphical animals, and to investigate human-animal relationships for different applications such as mental health and education. Although graphical animals have been studied in the past in the physical health and education domains, most of the time, their realizations were bound to computer screens, limiting their full potential, especially in terms of companionship and the provision of support. In this work, we describe past research efforts investigating influences of human-animal interaction on mental health and different realization of such animals. We discuss the idea that augmented reality could offer potential for human-animal interaction in terms of mental and social health, and propose several aspects of augmented reality animals that warrant further research for such interactions.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {workshop} } @inproceedings{Norouzi2019cb, title = {Walking Your Virtual Dog: Analysis of Awareness and Proxemics with Simulated Support Animals in Augmented Reality }, author = {Nahal Norouzi and Kangsoo Kim and Myungho Lee and Ryan Schubert and Austin Erickson and Jeremy Bailenson and Gerd Bruder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2019/10/Final__AR_Animal_ISMAR.pdf}, doi = {10.1109/ISMAR.2019.00040}, isbn = {978-1-7281-4765-9}, year = {2019}, date = {2019-10-16}, urldate = {2019-10-16}, booktitle = {Proceedings of the IEEE International Symposium on Mixed and Augmented Reality (ISMAR), 2019}, pages = {253-264}, publisher = {IEEE}, abstract = {Domestic animals have a long history of enriching human lives physically and mentally by filling a variety of different roles, such as service animals, emotional support animals, companions, and pets. Despite this, technological realizations of such animals in augmented reality (AR) are largely underexplored in terms of their behavior and interactions as well as effects they might have on human users' perception or behavior. In this paper, we describe a simulated virtual companion animal, in the form of a dog, in a shared AR space. We investigated its effects on participants' perception and behavior, including locomotion related to proxemics, with respect to their AR dog and other real people in the environment. We conducted a 2 by 2 mixed factorial human-subject study, in which we varied (i) the AR dog's awareness and behavior with respect to other people in the physical environment and (ii) the awareness and behavior of those people with respect to the AR dog. Our results show that having an AR companion dog changes participants' locomotion behavior, proxemics, and social interaction with other people who can or can not see the AR dog. We also show that the AR dog's simulated awareness and behaviors have an impact on participants' perception, including co-presence, animalism, perceived physicality, and dog's perceived awareness of the participant and environment. We discuss our findings and present insights and implications for the realization of effective AR animal companions.}, keywords = {A-ae, A-gb, A-gfw, A-kk, A-ml, A-nn, A-rs, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @misc{Erickson2019b, title = {Mediation of Multispectral Vision and its Impacts on User Perception}, author = {Austin Erickson}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/09/ISMAR_Doc_Consortium.pdf}, year = {2019}, date = {2019-10-15}, abstract = {As augmented reality head-mounted displays (HMDs), such as the Microsoft HoloLens and the Magic Leap One, become more accessible and ubiquitous, users are gaining access to a wealth of computer-mediated information that can be presented around them in 3D space. At the same time, camera and sensor costs and their physical footprint continue to decrease to the point where they can be easily integrated or mounted onto HMDs. Such cameras and sensors are capable of retrieving many different types of data from the user's environment, and when combined with such HMDs, can give users the ability to sense stimuli that are typically outside the range of human perception such as the thermal infrared and ultraviolet spectrums. Recent studies involving this combination of sensor and display technologies in the eld of augmented reality have shown that the method of presentation of sensor data in different modalities can impact the user's perception of their environment. There are many different approaches by which sensor data can be conveyed visually or through other means that have yet to be explored. The work presented in this paper gives an overview of two human-subject studies, one involving perception of temperature using thermal infrared and augmented reality displays, and one involving multispectral vision which combines thermal infrared and ultraviolet vision into a single working implementation. This prior work is discussed in detail along with potential avenues for future work.}, note = {IEEE ISMAR 2019 Doctoral Consortium}, keywords = {A-ae, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {presentation} } @article{Schmidt2019, title = {Effects of Virtual Agent and Object Representation on Experiencing Exhibited Artifacts }, author = {Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/07/Schmidt2019.pdf}, doi = {10.1016/j.cag.2019.06.002}, year = {2019}, date = {2019-10-01}, journal = {Elsevier Computers and Graphics}, volume = {83}, pages = {1-10}, abstract = {With the emergence of speech-controlled virtual agents (VAs) in consumer devices such as Amazon’s Echo or Apple’s HomePod, we have seen a large public interest in related technologies. While most of the current interactive conversational VAs appear in the form of voice-only assistants, other representations showing, for example, a contextually related or generic humanoid body are possible. In our previous work, we analyzed the effectiveness of different forms of VAs in the context of a virtual reality (VR) exhibition space. We found positive evidence that agent embodiment induces a higher sense of spatial and social presence. The results also suggest that both embodied and thematically related audio-visual representations of VAs positively affect the overall user experience. We extend this work by further analyzing the effects of the physicality of the agent’s environment (i.e., virtual vs. real). The results of the follow-up study indicate some benefits of virtual environments, e.g., regarding user engagement and learning of visual facts. We also evaluate some interaction effects between the representations of the virtual agent and its surrounding and discuss implications on the design of exhibition spaces.}, keywords = {A-gb, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Dieker2019uvr, title = {Using Virtual Rehearsal in a Simulator to Impact the Performance of Science Teachers}, author = {Lisa Dieker and Carrie Straub and Michael Hynes and Charles Hughes and Caitlyn Bukathy and Taylor Bousfield and Samantha Mrstik}, doi = {10.4018/IJGCMS.2019100101}, year = {2019}, date = {2019-10-01}, journal = {International Journal of Gaming and Computer-Mediated Simulations}, volume = {11}, number = {4}, pages = {1-20}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @patent{US10410541, title = {Physical-Virtual Patient Bed System}, author = {Gregory Welch and Karen Aroian and Steven Talbert and Kelly Allred and Patricia Weinstein and Arjun Nagendran and Remo Pillat}, url = {https://patents.google.com/patent/US10410541B2/en?oq=10%2c410%2c541 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=10410541&OS=10410541&RS=10410541 https://sreal.ucf.edu/wp-content/uploads/2022/08/US10410541.pdf}, year = {2019}, date = {2019-09-10}, urldate = {2019-09-10}, number = {US10410541B2}, location = {US}, note = {Filed: 2017-06-02}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @patent{Welch2019c, title = {Physical-Virtual Patient Bed System}, author = {Gregory Welch and Arjun Nagendran and Mary Lou Sole and Laura Gonzalez}, url = {https://patents.google.com/patent/US10380921B2/en?q=Physical-Virtual&q=Patient&q=Bed&q=System&oq=Physical-Virtual+Patient+Bed+System http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=10380921.PN.&OS=PN/10380921&RS=PN/10380921 https://sreal.ucf.edu/wp-content/uploads/2022/09/US10380921.pdf}, year = {2019}, date = {2019-08-13}, urldate = {2019-08-13}, number = {US10380921B2}, location = {3100 Technology Parkway}, abstract = {A patient simulation system for healthcare training is provided. The system includes one or more interchangeable shells comprising a physical anatomical model of at least a portion of a patient's body, the shell adapted to be illuminated from behind to provide one or more dynamic images viewable on the outer surface of the shells; a support system adapted to receive the shells via a mounting system, wherein the system comprises one or more image units adapted to render the one or more dynamic images viewable on the outer surface of the shells; one or more interface devices located about the patient shells to receive input and provide output; and one or more computing units in communication with the image units and interface devices, the computing units adapted to provide an interactive simulation for healthcare training.}, note = {Filed: 2015-07-01}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @techreport{Welch2019b, title = {Anticipating Widespread Augmented Reality: Insights from the 2018 AR Visioning Workshop}, author = {Gregory F. Welch and Gerd Bruder and Peter Squire and Ryan Schubert}, url = {https://stars.library.ucf.edu/ucfscholar/786/ https://sreal.ucf.edu/wp-content/uploads/2019/08/Welch2019b-1.pdf}, year = {2019}, date = {2019-08-06}, issuetitle = {Faculty Scholarship and Creative Works}, number = {786}, institution = {University of Central Florida and Office of Naval Research}, abstract = {In August of 2018 a group of academic, government, and industry experts in the field of Augmented Reality gathered for four days to consider potential technological and societal issues and opportunities that could accompany a future where AR is pervasive in location and duration of use. This report is intended to summarize some of the most novel and potentially impactful insights and opportunities identified by the group. Our target audience includes AR researchers, government leaders, and thought leaders in general. It is our intent to share some compelling technological and societal questions that we believe are unique to AR, and to engender new thinking about the potentially impactful synergies associated with the convergence of AR and some other conventionally distinct areas of research.}, keywords = {A-gb, A-gfw, A-rs, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {techreport} } @phdthesis{Hochreiter2019, title = {Multi-touch Detection and Semantic Response on Non-parametric Rear-projection Surfaces}, author = {Jason Hochreiter}, url = {https://sreal.ucf.edu/wp-content/uploads/2020/08/Hochreiter2019-dissertation_Multi-touch-Detection-and-Semantic-Response-on-Non-parametric-Rea-1.pdf https://stars.library.ucf.edu/do/search/?q=jason%20hochreiter&start=0&context=7014507&facet=}, year = {2019}, date = {2019-08-01}, keywords = {A-jh, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {phdthesis} } @patent{Fuchs2019, title = {Methods, systems, and computer readable media for unified scene acquisition and pose tracking in a wearable display}, author = {Henry Fuchs and Mingsong Dou and Gregory Welch and Jan-Michael Frahm}, url = {https://patents.google.com/patent/US10365711B2/ http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=10,365,711.PN.&OS=PN/10,365,711&RS=PN/10,365,711 }, year = {2019}, date = {2019-07-30}, number = {US10365711B2}, abstract = {Methods, systems, and computer readable media for unified scene acquisition and pose tracking in a wearable display are disclosed. According to one aspect, a system for unified scene acquisition and pose tracking in a wearable display includes a wearable frame configured to be worn by a user. Mounted on the frame are: at least one sensor for acquiring scene information for a real scene proximate to the user, the scene information including images and depth information; a pose tracker for estimating the user's head pose based on the acquired scene information; a rendering unit for generating a virtual reality (VR) image based on the acquired scene information and estimated head pose; and at least one display for displaying to the user a combination of the generated VR image and the scene proximate to the user.}, note = {Filed: 2013-05-17}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @incollection{Ingraham2019udp, title = {Using Digital Puppetry to Prepare Physicians to Address Non-Suicidal Self-Injury Among Teens}, author = {Kathleen Ingraham and Charles Hughes and Nicholas Westers and Lisa Dieker and Michael Hynes}, editor = {Magherita Antona and Constantine Stephanidis}, doi = {10.1007/978-3-030-23560-4}, year = {2019}, date = {2019-07-26}, booktitle = {Universal Access in Human-Computer Interaction. Theory, Methods and Tools}, journal = {Lecture Notes in Computer Science}, volume = {11572}, number = {555-568}, publisher = {Springer}, address = {Cham Switzerland}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {incollection} } @article{Kim2019blow, title = {Blowing in the Wind: Increasing Social Presence with a Virtual Human via Environmental Airflow Interaction in Mixed Reality}, author = {Kangsoo Kim and Ryan Schubert and Jason Hochreiter and Gerd Bruder and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/06/ELSEVIER_C_G2019_Special_BlowWindinMR_ICAT_EGVE2018_20190606_reduced.pdf}, doi = {10.1016/j.cag.2019.06.006}, year = {2019}, date = {2019-07-05}, journal = {Elsevier Computers and Graphics}, volume = {83}, number = {October 2019}, pages = {23-32}, abstract = {In this paper, we describe two human-subject studies in which we explored and investigated the effects of subtle multimodal interaction on social presence with a virtual human (VH) in mixed reality (MR). In the studies, participants interacted with a VH, which was co-located with them across a table, with two different platforms: a projection based MR environment and an optical see-through head-mounted display (OST-HMD) based MR environment. While the two studies were not intended to be directly comparable, the second study with an OST-HMD was carefully designed based on the insights and lessons learned from the first projection-based study. For both studies, we compared two levels of gradually increased multimodal interaction: (i) virtual objects being affected by real airflow (e.g., as commonly experienced with fans during warm weather), and (ii) a VH showing awareness of this airflow. We hypothesized that our two levels of treatment would increase the sense of being together with the VH gradually, i.e., participants would report higher social presence with airflow influence than without it, and the social presence would be even higher when the VH showed awareness of the airflow. We observed an increased social presence in the second study when both physical–virtual interaction via airflow and VH awareness behaviors were present, but we observed no clear difference in participant-reported social presence with the VH in the first study. As the considered environmental factors are incidental to the direct interaction with the real human, i.e., they are not significant or necessary for the interaction task, they can provide a reasonably generalizable approach to increase social presence in HMD-based MR environments beyond the specific scenario and environment described here.}, keywords = {A-gb, A-gfw, A-kk, A-rs, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @conference{gonzalez_2019_vera, title = {Vera Real : Stroke Assessment Using a Physical Virtual Patient (PVP)}, author = {Laura Gonzalez and Salam Daher and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/07/INACSL-_Conference_VERA.pdf}, year = {2019}, date = {2019-06-21}, organization = {INACSL}, abstract = {Introduction: Simulation has revolutionized the way we teach and learn; and the pedagogy of simulation continues to mature. Mannequins have limitations such as its inability to exhibit emotions, idle movements, or interactive patient gaze. As a result, students struggle with suspension of disbelief and may be unable to relate to the “patient” authentically. Physical virtual patients (PVP) are a new type of simulator which combines the physicality of mannequins plus the richness of dynamic imagery such as blink, smile, and other facial expressions. The purpose of this study was to compare a traditional mannequin vs. a more realistic PVP head. The concept under consideration is realism and its influence on engagement and learning. Methods: The study used a pre-test, post-test, randomized in-between subject design (N=59) with undergraduate nursing students. Students assessed an evolving stroke patient, and completed post simulation questions to evaluate engagement and sense of urgency. A knowledge pre-simulation and post-simulation test were administered to evaluate learning. Results: Participants where more engaged with the PVP condition; which provoked a higher sense of urgency. There was a significant change between the pre-simulation test, and post-simulation test which supported increased learning for the PVP when compared to the mannequin. Discussion: This study demonstrated that increasing realism, could increase engagement which may result in a greater sense of urgency and learning. This PVP technology is a viable addition to mannequin based simulation. Future works includes extending this technology to a full body PVP. }, keywords = {A-gfw, A-sd}, pubstate = {published}, tppubtype = {conference} } @conference{inacsl_2019_arvr, title = {Virtual/ Augmented Reality for Health Professions Education Symposium}, author = {Michelle Aebersold and Salam Daher and Cynthia Foronda and Jone Tiffany and Margaret Verkuyl}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/08/INACSL-ConferenceAR-VRSymposium.pdf}, year = {2019}, date = {2019-06-19}, organization = {INACSL}, keywords = {A-sd, SREAL}, pubstate = {published}, tppubtype = {conference} } @patent{Fuchs2019b, title = {Methods, systems, and computer readable media for improved illumination of spatial augmented reality objects }, author = {Henry Fuchs and Gregory Welch}, url = {https://patents.google.com/patent/US10321107B2/ http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=10321107.PN.&OS=PN/10321107&RS=PN/10321107 }, year = {2019}, date = {2019-06-11}, number = {US10321107B2}, abstract = {A system for illuminating a spatial augmented reality object includes an augmented reality object including a projection surface having a plurality of apertures formed through the projection surface. The system further includes a lenslets layer including a plurality of lenslets and conforming to curved regions of the of the projection surface for directing light through the apertures. The system further includes a camera for measuring ambient illumination in an environment of the projection surface. The system further includes a projected image illumination adjustment module for adjusting illumination of a captured video image. The system further includes a projector for projecting the illumination adjusted captured video image onto the projection surface via the lenslets layer and the apertures.}, note = {Filed: 2014-11-12}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @article{Norouzi2019c, title = {Augmented Rotations in Virtual Reality for Users with a Reduced Range of Head Movement}, author = {Nahal Norouzi and Luke Bölling and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/05/RATE2019_AugmentedRotations.pdf}, doi = {10.1177/2055668319841309}, year = {2019}, date = {2019-05-21}, journal = {Journal of Rehabilitation and Assistive Technologies Engineering}, volume = {6}, pages = {1-9}, abstract = {Introduction: A large body of research in the field of virtual reality (VR) is focused on making user interfaces more natural and intuitive by leveraging natural body movements to explore a virtual environment. For example, head-tracked user interfaces allow users to naturally look around a virtual space by moving their head. However, such approaches may not be appropriate for users with temporary or permanent limitations of their head movement. Methods: In this paper, we present techniques that allow these users to get virtual benefits from a reduced range of physical movements. Specifically, we describe two techniques that augment virtual rotations relative to physical movement thresholds. Results: We describe how each of the two techniques can be implemented with either a head tracker or an eye tracker,e.g., in cases when no physical head rotations are possible. Conclusions: We discuss their differences and limitations and we provide guidelines for the practical use of such augmented user interfaces.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @misc{daher_2019_ncwit, title = {NCWIT Panel}, author = {Salam Daher and Veenadhari Kollipara}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/08/ncwit2019.png}, year = {2019}, date = {2019-05-05}, keywords = {A-sd, SREAL}, pubstate = {published}, tppubtype = {presentation} } @article{Blate2019aa, title = {Implementation and Evaluation of a 50kHz, 28μs Motion-to-Pose Latency Head Tracking Instrument}, author = {Alex Blate and Mary Whitton and Montek Singh and Greg Welch and Andrei State and Turner Whitted and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/07/Blate2019aa.pdf}, doi = {10.1109/TVCG.2019.2899233}, issn = {1077-2626}, year = {2019}, date = {2019-05-01}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {25}, number = {5}, pages = {1970-1980}, abstract = {This paper presents the implementation and evaluation of a 50,000-pose-sample-per-second, 6-degree-of-freedom optical head tracking instrument with motion-to-pose latency of 28μs and dynamic precision of 1-2 arcminutes. The instrument uses high-intensity infrared emitters and two duo-lateral photodiode-based optical sensors to triangulate pose. This instrument serves two purposes: it is the first step towards the requisite head tracking component in sub-100μs motion-to-photon latency optical see- through augmented reality (OST AR) head-mounted display (HMD) systems; and it enables new avenues of research into human visual perception – including measuring the thresholds for perceptible real-virtual displacement during head rotation and other human research requiring high-sample-rate motion tracking. The instrument’s tracking volume is limited to about 120×120×250mm but allows for the full range of natural head rotation and is sufficient for research involving seated users. We discuss how the instrument’s tracking volume is scalable in multiple ways and some of the trade-offs involved therein. Finally, we introduce a novel laser-pointer-based measurement technique for assessing the instrument’s tracking latency and repeatability. We show that the instrument’s motion-to-pose latency is 28μs and that it is repeatable within 1-2 arcminutes at mean rotational velocities (yaw) in excess of 500°/sec.}, keywords = {A-gfw, F-ONR, P-EPICAR}, pubstate = {published}, tppubtype = {article} } @article{Miller2019, title = {Social Interaction in Augmented Reality}, author = {Mark Roman Miller and Hanseul Jun and Fernanda Herrera and Jacob Yu Villa and Greg Welch and Jeremy N Bailenson}, url = {https://doi.org/10.1371/journal.pone.0216290 https://sreal.ucf.edu/wp-content/uploads/2019/05/Miller2019.pdf}, doi = {10.1371/journal.pone.0216290}, year = {2019}, date = {2019-05-01}, journal = {PLOS ONE}, volume = {14}, number = {5}, pages = {1-26}, publisher = {Public Library of Science}, abstract = {There have been decades of research on the usability and educational value of augmented reality. However, less is known about how augmented reality affects social interactions. The current paper presents three studies that test the social psychological effects of augmented reality. Study 1 examined participants’ task performance in the presence of embodied agents and replicated the typical pattern of social facilitation and inhibition. Participants performed a simple task better, but a hard task worse, in the presence of an agent compared to when participants complete the tasks alone. Study 2 examined nonverbal behavior. Participants met an agent sitting in one of two chairs and were asked to choose one of the chairs to sit on. Participants wearing the headset never sat directly on the agent when given the choice of two seats, and while approaching, most of the participants chose the rotation direction to avoid turning their heads away from the agent. A separate group of participants chose a seat after removing the augmented reality headset, and the majority still avoided the seat previously occupied by the agent. Study 3 examined the social costs of using an augmented reality headset with others who are not using a headset. Participants talked in dyads, and augmented reality users reported less social connection to their partner compared to those not using augmented reality. Overall, these studies provide evidence suggesting that task performance, nonverbal behavior, and social connectedness are significantly affected by the presence or absence of virtual content.}, keywords = {A-gfw, F-NSF, P-ARA, SREAL}, pubstate = {published}, tppubtype = {article} } @phdthesis{Lee2019, title = {Mediated Physicality: Inducing Illusory Physicality of Virtual Humans via Their Interactions with Physical Objects}, author = {Myungho Lee}, url = {https://nam02.safelinks.protection.outlook.com/?url=https%3A%2F%2Fstars.library.ucf.edu%2Fcgi%2Fviewcontent.cgi%3Farticle%3D7295%26context%3Detd&data=02%7C01%7CBarbara.Lee%40ucf.edu%7C41a8f0bd17084e9aa2d508d6ee7fec5a%7Cbb932f15ef3842ba91fcf3c59d5dd1f1%7C0%7C0%7C636958630521181141&sdata=tXZmmQkOfuU71LPQkIe0DBtFiUZwqWP8MQ9NDqoEvRw%3D&reserved=0 https://sreal.ucf.edu/wp-content/uploads/2019/06/Lee-2019-Dissertation.pdf}, year = {2019}, date = {2019-04-02}, school = {University of Central Florida}, abstract = {The term virtual human (VH) generally refers to a human-like entity comprised of computer graphics and/or physical body. In the associated research literature, a VH can be further classified as an avatar—a human-controlled VH, or an agent—a computer-controlled VH. Because of the resemblance with humans, people naturally distinguish them from non-human objects, and often treat them in ways similar to real humans. Sometimes people develop a sense of co-presence or social presence with the VH—a phenomenon that is often exploited for training simulations where the VH assumes the role of a human. Prior research associated with VHs has primarily focused on the realism of various visual traits, e.g., appearance, shape, and gestures. However, our sense of the presence of other humans is also affected by other physical sensations conveyed through nearby space or physical objects. For example, we humans can perceive the presence of other individuals via the sound or tactile sensation of approaching footsteps, or by the presence of complementary or opposing forces when carrying a physical box with another person. In my research, I exploit the fact that these sensations, when correlated with events in the shared space, affect one’s feeling of social/co-presence with another person. In this dissertation, I introduce novel methods for utilizing direct and indirect physical-virtual interactions with VHs to increase the sense of social/co-presence with the VHs—an approach I refer to as mediated physicality. I present results from controlled user studies, in various virtual environment settings, that support the idea that mediated physicality can increase a user’s sense of social/co-presence with the VH, and/or induced realistic social behavior. I discuss relationships to prior research, possible explanations for my findings, and areas for future research.}, keywords = {A-ml, F-ONR, P-EPICAR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {phdthesis} } @inproceedings{daher2019matching, title = {[POSTER] Matching vs. Non-Matching Visuals and Shape for Embodied Virtual Healthcare Agents}, author = {Salam Daher and Jason Hochreiter and Nahal Norouzi and Ryan Schubert and Gerd Bruder and Laura Gonzalez and Mindi Anderson and Desiree Diaz and Juan Cendan and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/03/IEEEVR2019_Poster_PVChildStudy.pdf}, year = {2019}, date = {2019-03-27}, publisher = {Proceedings of IEEE Virtual Reality (VR), 2019}, abstract = {Embodied virtual agents serving as patient simulators are widely used in medical training scenarios, ranging from physical patients to virtual patients presented via virtual and augmented reality technologies. Physical-virtual patients are a hybrid solution that combines the benefits of dynamic visuals integrated into a human-shaped physical form that can also present other cues, such as pulse, breathing sounds, and temperature. Sometimes in simulation the visuals and shape do not match. We carried out a human-participant study employing graduate nursing students in pediatric patient simulations comprising conditions associated with matching/non-matching of the visuals and shape.}, keywords = {A-gb, A-gfw, A-jh, A-nn, A-rs, A-sd, F-NSF, P-ARA, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Lee2018b, title = {The Virtual Pole: Exploring Human Responses to Fear of Heights in Immersive Virtual Environments}, author = {Myungho Lee and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/06/Lee2019b.pdf}, doi = {10.20385/1860-2037/14.2017.6}, issn = {1860-2037}, year = {2019}, date = {2019-02-04}, journal = {Journal of Virtual Reality and Broadcasting}, volume = {14(2017)}, number = {6}, abstract = {Measuring how effective immersive virtual environments (IVEs) are in reproducing sensations as in similar situations in the real world is an important task for many application fields. In this paper, we present an experimental setup which we call the virtual pole, where we evaluated human responses to fear of heights. We conducted experiments where we analyzed correlations between subjective and physiological anxiety measures and the participant’s view direction. Our results show that the view direction plays an important role in subjective and physiological anxiety in an IVE due to the limited field of view, and that the subjective and physiological anxiety measures monotonically increase with the increasing height. In addition, we also found that participants recollected the virtual content they saw at the top more accurately compared to that at the medium height. We discuss the results and provide guidelines for simulations aimed at evoking fear of heights responses in IVEs.}, keywords = {A-gb, A-gfw, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @misc{daher_2019_otronicon, title = {Patient Simulators: the Past, Present, and Future}, author = {Salam Daher}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/08/otronicon_2019.pdf}, year = {2019}, date = {2019-01-21}, keywords = {A-sd, SREAL}, pubstate = {published}, tppubtype = {presentation} } @inbook{Norouzi2019, title = {A Systematic Review of the Convergence of Augmented Reality, Intelligent Virtual Agents, and the Internet of Things}, author = {Nahal Norouzi and Gerd Bruder and Brandon Belna and Stefanie Mutter and Damla Turgut and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/05/Norouzi-2019-IoT-AR-Final.pdf}, doi = {10.1007/978-3-030-04110-6_1}, isbn = {978-3-030-04109-0}, year = {2019}, date = {2019-01-10}, booktitle = {Artificial Intelligence in IoT}, pages = {37}, publisher = {Springer}, abstract = {In recent years we are beginning to see the convergence of three distinct research fields: Augmented Reality (AR), Intelligent Virtual Agents (IVAs), and the Internet of Things (IoT). Each of these has been classified as a disruptive technology for our society. Since their inception, the advancement of knowledge and development of technologies and systems in these fields was traditionally performed with limited input from each other. However, over the last years, we have seen research prototypes and commercial products being developed that cross the boundaries between these distinct fields to leverage their collective strengths. In this review paper, we resume the body of literature published at the intersections between each two of these fields, and we discuss a vision for the nexus of all three technologies.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{Nilsson2018a, title = {15 Years of Research on Redirected Walking in Immersive Virtual Environments}, author = {Niels Christian Nilsson and Tabitha Peck and Gerd Bruder and Eric Hodgson and Stefania Serafin and Mary Whitton and Frank Steinicke and Evan Suma Rosenberg}, year = {2018}, date = {2018-12-01}, journal = {IEEE Computer Graphics and Applications}, volume = {38}, number = {2}, pages = {44-56}, abstract = {Virtual reality users wearing head-mounted displays can experience the illusion of walking in any direction for infinite distance while, in reality, they are walking a curvilinear path in physical space. This is accomplished by introducing unnoticeable rotations to the virtual environment-a technique called redirected walking. This paper gives an overview of the research that has been performed since redirected walking was first practically demonstrated 15 years ago.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @article{Janeh2018a, title = {Analyses of Gait Parameters of Younger and Older Adults during (Non-)Isometric Virtual Walking}, author = {Omar Janeh and Gerd Bruder and Frank Steinicke and Alessandro Gulberti and Monika Poetter-Nerger}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/01/Janeh2018a.pdf}, doi = {10.1109/TVCG.2017.2771520}, year = {2018}, date = {2018-12-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {24}, number = {10}, pages = {2663-2674}, abstract = {Understanding real walking in virtual environments (VEs) is important for immersive experiences, allowing users to move through VEs in the most natural way. Previous studies have shown that basic implementations of real walking in virtual spaces, in which head-tracked movements are mapped isometrically to a VE, are not estimated as entirely natural. Instead, users estimate a virtual walking velocity as more natural when it is slightly increased compared to the user's physical locomotion. However, these findings have been reported in most cases only for young persons, e.g., students, whereas older adults are clearly underrepresented in such studies. Recently, virtual reality (VR) has received significant public and media attention. Therefore, it appears reasonable to assume that people at different ages will have access to VR, and might use this technology more and more in application scenarios such as rehabilitation or training. To better understand how people at different ages walk and perceive locomotion in VR, we have performed a study to investigate the effects of (non-)isometric mappings between physical movements and virtual motions in the VE on the walking biomechanics across generations, i.e., younger and older adults. Three primary domains (pace, base of support and phase) of spatio-temporal parameters were identified to evaluate gait performance. The results show that the older adults walked very similar in the real and VE in the pace and phasic domains, which differs from results found in younger adults. In contrast, the results indicate differences in terms of base of support domain parameters for both groups while walking within a VE and the real world. For non-isometric mappings, we found in both younger and older adults an increased divergence of gait parameters in all domains correlating with the up- or down-scaled velocity of visual self-motion feedback. The results provide important insights into the design of future VR applications for older adults in domains ranging from medicine and psychology to rehabilitation.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{Lee2018ac, title = {The Physical-virtual Table: Exploring the Effects of a Virtual Human's Physical Influence on Social Interaction}, author = {Myungho Lee and Nahal Norouzi and Gerd Bruder and Pamela J. Wisniewski and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Lee2018ab.pdf}, doi = {10.1145/3281505.3281533}, isbn = {978-1-4503-6086-9}, year = {2018}, date = {2018-11-28}, booktitle = {Proceedings of the 24th ACM Symposium on Virtual Reality Software and Technology}, journal = {Proceedings of the 24th ACM Symposium on Virtual Reality Software and Technology}, pages = {25:1--25:11}, publisher = {ACM}, address = {New York, NY, USA}, series = {VRST '18}, abstract = {In this paper, we investigate the effects of the physical influence of a virtual human (VH) in the context of face-to-face interaction in augmented reality (AR). In our study, participants played a tabletop game with a VH, in which each player takes a turn and moves their own token along the designated spots on the shared table. We com- pared two conditions as follows: the VH in the virtual condition moves a virtual token that can only be seen through AR glasses, while the VH in the physical condition moves a physical token as the participants do; therefore the VH’s token can be seen even in the periphery of the AR glasses. For the physical condition, we designed an actuator system underneath the table. The actuator moves a magnet under the table which then moves the VH’s phys- ical token over the surface of the table. Our results indicate that participants felt higher co-presence with the VH in the physical condition, and participants assessed the VH as a more physical entity compared to the VH in the virtual condition. We further ob- served transference effects when participants attributed the VH’s ability to move physical objects to other elements in the real world. Also, the VH’s physical influence improved participants’ overall experience with the VH. We discuss potential explanations for the findings and implications for future shared AR tabletop setups.}, note = {Best Paper Award}, keywords = {A-gb, A-gfw, A-ml, A-nn, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @phdthesis{Kim2018Thesis, title = {Environmental Physical–Virtual Interaction to Improve Social Presence of a Virtual Human in Mixed Reality}, author = {Kangsoo Kim}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/KangsooKIM_PhD_Dissertation_20181119.pdf}, year = {2018}, date = {2018-11-21}, school = {The University of Central Florida}, abstract = {Interactive Virtual Humans (VHs) are increasingly used to replace or assist real humans in various applications, e.g., military and medical training, education, or entertainment. In most VH research, the perceived social presence with a VH, which denotes the user's sense of being socially connected or co-located with the VH, is the decisive factor in evaluating the social influence of the VH—a phenomenon where human users' emotions, opinions, or behaviors are affected by the VH. The purpose of this dissertation is to develop new knowledge about how characteristics and behaviors of a VH in a Mixed Reality (MR) environment can affect the perception of and resulting behavior with the VH, and to find effective and efficient ways to improve the quality and performance of social interactions with VHs. Important issues and challenges in real–virtual human interactions in MR, e.g., lack of physical–virtual interaction, are identified and discussed through several user studies incorporating interactions with VH systems. In the studies, different features of VHs are prototyped and evaluated, such as a VH's ability to be aware of and influence the surrounding physical environment, while measuring objective behavioral data as well as collecting subjective responses from the participants. The results from the studies support the idea that the VH's aware- ness and influence of the physical environment can improve not only the perceived social presence with the VH, but also the trustworthiness of the VH within a social context. The findings will contribute towards designing more influential VHs that can benefit a wide range of simulation and training applications for which a high level of social realism is important, and that can be more easily incorporated into our daily lives as social companions, providing reliable relationships and convenience in assisting with daily tasks.}, keywords = {A-kk, F-ONR, P-EPICAR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {phdthesis} } @inproceedings{Kim2018c, title = {Blowing in the Wind: Increasing Copresence with a Virtual Human via Airflow Influence in Augmented Reality}, author = {Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/10/Kim_Airflow_ICAT_EGVE2018.pdf}, doi = {10.2312/egve.20181332}, year = {2018}, date = {2018-11-07}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2018), Limassol, Cyprus, November 7–9, 2018}, pages = {183-190}, abstract = {In a social context where two or more interlocutors interact with each other in the same space, one's sense of copresence with the others is an important factor for the quality of communication and engagement in the interaction. Although augmented reality (AR) technology enables the superposition of virtual humans (VHs) as interlocutors in the real world, the resulting sense of copresence is usually far lower than with a real human interlocutor. In this paper, we describe a human-subject study in which we explored and investigated the effects that subtle multi-modal interaction between the virtual environment and the real world, where a VH and human participants were co-located, can have on copresence. We compared two levels of gradually increased multi-modal interaction: (i) virtual objects being affected by real airflow as commonly experienced with fans in summer, and (ii) a VH showing awareness of this airflow. We chose airflow as one example of an environmental factor that can noticeably affect both the real and virtual worlds, and also cause subtle responses in interlocutors. We hypothesized that our two levels of treatment would increase the sense of being together with the VH gradually, i.e., participants would report higher copresence with airflow influence than without it, and the copresence would be even higher when the VH shows awareness of the airflow. The statistical analysis with the participant-reported copresence scores showed that there was an improvement of the perceived copresence with the VH when both the physical–virtual interactivity via airflow and the VH's awareness behaviors were present together. As the considered environmental factors are directed at the VH, i.e., they are not part of the direct interaction with the real human, they can provide a reasonably generalizable approach to support copresence in AR beyond the particular use case in the present experiment.}, note = {Honorable Mention Award}, keywords = {A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch2018ab, title = {A Novel Approach for Cooperative Motion Capture (COMOCAP)}, author = {Greg Welch and Tianren Wang and Gary Bishop and Gerd Bruder}, editor = {G. Bruder and S. Cobb and S. Yoshimoto}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Welch2018ab.pdf}, year = {2018}, date = {2018-11-07}, booktitle = {ICAT-EGVE 2018 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}, publisher = {The Eurographics Association}, address = {Limassol, Cyprus}, abstract = {Conventional motion capture (MOCAP) systems, e.g., optical systems, typically perform well for one person, but less so for multiple people in close proximity. Measurement quality can decline with distance, and even drop out as source/sensor components are occluded by nearby people. Furthermore, conventional optical MOCAP systems estimate body posture using a global estimation approach employing cameras that are fixed in the environment, typically at a distance such that one person or object can easily occlude another, and the relative error between tracked objects in the scene can increase as they move farther from the cameras and/or closer to each other. Body-relative tracking approaches use body-worn sensors and/or sources to track limbs with respect to the head or torso, for example, taking advantage of the proximity of limbs to the body. We present a novel approach to MOCAP that combines and extends conventional global and body- relative approaches by distributing both sensing and active signaling over each person’s body to facilitate body-relative (intra-user) MOCAP for one person and body-body (inter-user) MOCAP for multiple people, in an approach we call cooperative motion capture (COMOCAP). We support the validity of the approach with simulation results from a system comprised of acoustic transceivers (receiver-transmitter units) that provide inter-transceiver range measurements. Optical, magnetic, and other types of transceivers could also be used. Our simulations demonstrate the advantages of this approach to effectively improve accuracy and robustness to occlusions in situations of close proximity between multiple persons.}, keywords = {A-gb, A-gfw, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Schubert2018, title = {Adaptive filtering of physical-virtual artifacts for synthetic animatronics}, author = {Ryan Schubert and Gerd Bruder and Greg Welch}, editor = {G. Bruder and S. Cobb and S. Yoshimoto}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/01/Schubert2018.pdf}, year = {2018}, date = {2018-11-07}, booktitle = {ICAT-EGVE 2018 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments, Limassol, Cyprus, November 7-9 2018}, keywords = {A-gb, A-gfw, A-rs, F-NSF, F-ONR, P-ARA, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Schmidt2018a, title = {Effects of Embodiment on Generic and Content-Specific Intelligent Virtual Agents as Exhibition Guides}, author = {Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/01/Schmidt2018a.pdf}, doi = {10.2312/egve.20181309}, year = {2018}, date = {2018-11-07}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE 2018), Limassol, Cyprus, November 7–9, 2018}, pages = {13-20}, abstract = {Intelligent Virtual Agents (IVAs) received enormous attention in recent years due to significant improvements in voice communication technologies and the convergence of different research fields such as Machine Learning, Internet of Things, and Virtual Reality (VR). Interactive conversational IVAs can appear in different forms such as voice-only or with embodied audio-visual representations showing, for example, human-like contextually related or generic three-dimensional bodies. In this paper, we analyzed the benefits of different forms of virtual agents in the context of a VR exhibition space. Our results suggest positive evidence showing large benefits of both embodied and thematically related audio-visual representations of IVAs. We discuss implications and suggestions for content developers to design believable virtual agents in the context of such installations.}, note = {Best Paper Award}, keywords = {A-gb, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Norouzi2018c, title = {A Systematic Survey of 15 Years of User Studies Published in the Intelligent Virtual Agents Conference}, author = {Nahal Norouzi and Kangsoo Kim and Jason Hochreiter and Myungho Lee and Salam Daher and Gerd Bruder and Gregory Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/p17-norouzi-2.pdf}, doi = {10.1145/3267851.3267901}, isbn = {978-1-4503-6013-5/18/11}, year = {2018}, date = {2018-11-05}, booktitle = {IVA '18 Proceedings of the 18th International Conference on Intelligent Virtual Agents}, pages = {17-22}, publisher = {ACM}, organization = {ACM}, abstract = {The field of intelligent virtual agents (IVAs) has evolved immensely over the past 15 years, introducing new application opportunities in areas such as training, health care, and virtual assistants. In this survey paper, we provide a systematic review of the most influential user studies published in the IVA conference from 2001 to 2015 focusing on IVA development, human perception, and interactions. A total of 247 papers with 276 user studies have been classified and reviewed based on their contributions and impact. We identify the different areas of research and provide a summary of the papers with the highest impact. With the trends of past user studies and the current state of technology, we provide insights into future trends and research challenges.}, keywords = {A-gb, A-gfw, A-jh, A-kk, A-nn, A-sd, F-NSF, F-ONR, P-ARA, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{daher2018physical, title = {Physical-Virtual Agents for Healthcare Simulation}, author = {Salam Daher and Jason Hochreiter and Nahal Norouzi and Laura Gonzalez and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/10/IVA2018_StrokeStudy_CameraReady_Editor_20180911_1608.pdf}, year = {2018}, date = {2018-11-04}, booktitle = {Proceedings of IVA 2018, November 5-8, 2018, Sydney, NSW, Australia}, publisher = {ACM}, abstract = {Conventional Intelligent Virtual Agents (IVAs) focus primarily on the visual and auditory channels for both the agent and the interacting human: the agent displays a visual appearance and speech as output, while processing the human’s verbal and non-verbal behavior as input. However, some interactions, particularly those between a patient and healthcare provider, inherently include tactile components.We introduce an Intelligent Physical-Virtual Agent (IPVA) head that occupies an appropriate physical volume; can be touched; and via human-in-the-loop control can change appearance, listen, speak, and react physiologically in response to human behavior. Compared to a traditional IVA, it provides a physical affordance, allowing for more realistic and compelling human-agent interactions. In a user study focusing on neurological assessment of a simulated patient showing stroke symptoms, we compared the IPVA head with a high-fidelity touch-aware mannequin that has a static appearance. Various measures of the human subjects indicated greater attention, affinity for, and presence with the IPVA patient, all factors that can improve healthcare training.}, keywords = {A-gb, A-gfw, A-jh, A-nn, A-sd, F-NSF, P-ARA, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Jamshidi2018aa, title = {Mine the Gap: Gap Estimation and Contact Detection Information via Adjacent Surface Observation}, author = {Yazdan Jamshidi and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Jamshidi2018.pdf}, doi = {10.1145/3243250.3243260 }, year = {2018}, date = {2018-10-24}, urldate = {2018-11-08}, booktitle = {Proceedings of the International Conference on Pattern Recognition and Artificial Intelligence}, pages = {54--58}, publisher = {ACM}, address = {New York, NY, USA}, series = {PRAI 2018}, keywords = {A-gfw, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Haesler2018, title = {[POSTER] Seeing is Believing: Improving the Perceived Trust in Visually Embodied Alexa in Augmented Reality}, author = {Steffen Haesler and Kangsoo Kim and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/08/Haesler2018.pdf}, doi = {10.1109/ISMAR-Adjunct.2018.00067}, year = {2018}, date = {2018-10-16}, booktitle = {Proceedings of the 17th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2018), Munich, Germany, October 16–20, 2018}, abstract = {Voice-activated Intelligent Virtual Assistants (IVAs) such as Amazon Alexa offer a natural and realistic form of interaction that pursues the level of social interaction among real humans. The user experience with such technologies depends to a large degree on the perceived trust in and reliability of the IVA. In this poster, we explore the effects of a three-dimensional embodied representation of Amazon Alexa in Augmented Reality (AR) on the user’s perceived trust in her being able to control Internet of Things (IoT) devices in a smart home environment. We present a preliminary study and discuss the potential of positive effects in perceived trust due to the embodied representation compared to a voice-only condition.}, keywords = {A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Kim2018a, title = {Does a Digital Assistant Need a Body? The Influence of Visual Embodiment and Social Behavior on the Perception of Intelligent Virtual Agents in AR}, author = {Kangsoo Kim and Luke Boelling and Steffen Haesler and Jeremy N. Bailenson and Gerd Bruder and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/08/Kim2018a.pdf}, doi = {10.1109/ISMAR.2018.00039}, year = {2018}, date = {2018-10-16}, booktitle = {Proceedings of the 17th IEEE International Symposium on Mixed and Augmented Reality (ISMAR 2018), Munich, Germany, October 16–20, 2018}, abstract = {Intelligent Virtual Agents (IVAs) are becoming part of our everyday life, thanks to artificial intelligence technology and Internet of Things devices. For example, users can control their connected home appliances through natural voice commands to the IVA. However, most current-state commercial IVAs, such as Amazon Alexa, mainly focus on voice commands and voice feedback, and lack the ability to provide non-verbal cues which are an important part of social interaction. Augmented Reality (AR) has the potential to overcome this challenge by providing a visual embodiment of the IVA. In this paper we investigate how visual embodiment and social behaviors influence the perception of the IVA. We hypothesize that a user's confidence in an IVA's ability to perform tasks is improved when imbuing the agent with a human body and social behaviors compared to the agent solely depending on voice feedback. In other words, an agent's embodied gesture and locomotion behavior exhibiting awareness of the surrounding real world or exerting influence over the environment can improve the perceived social presence with and confidence in the agent. We present a human-subject study, in which we evaluated the hypothesis and compared different forms of IVAs with speech, gesturing, and locomotion behaviors in an interactive AR scenario. The results show support for the hypothesis with measures of confidence, trust, and social presence. We discuss implications for future developments in the field of IVAs.}, keywords = {A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Thomas2018aa, title = {Situated Analytics }, author = {Bruce H. Thomas and Gregory F. Welch and Pierre Dragicevic and Niklas Elmqvist and Pourang Irani and Yvonne Jansen and Dieter Schmalstieg and Aurélien Tabard and Neven A. M. ElSayed and Ross T. Smith and Wesley Willett}, editor = {Kim Marriott and Falk Schreiber and Tim Dwyer and Karsten Klein and Nathalie Henry Riche and Takayuki Itoh and Wolfgang Stuerzlinger and Bruce H. Thomas }, url = {https://sreal.ucf.edu/wp-content/uploads/2018/10/Thomas2018aa.pdf}, doi = {10.1007/978-3-030-01388-2_7}, isbn = {978-3-030-01388-2}, year = {2018}, date = {2018-10-16}, urldate = {2018-10-23}, booktitle = {Immersive Analytics. Lecture Notes in Computer Science}, volume = {11190}, pages = {185--220}, publisher = {Springer International Publishing}, address = {Cham}, chapter = {7}, abstract = {This chapter introduces the concept of situated analytics that employs data representations organized in relation to germane objects, places, and persons for the purpose of understanding, sensemaking, and decision-making. The components of situated analytics are characterized in greater detail, including the users, tasks, data, representations, interactions, and analytical processes involved. Several case studies of projects and products are presented that exemplify situated analytics in action. Based on these case studies, a set of derived design considerations for building situated analytics applications are presented. Finally, there is a an outline of a research agenda of challenges and research questions to explore in the future.}, keywords = {A-gfw, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{Oh2018, title = {A Systematic Review of Social Presence: Definition, Antecedents, and Implications}, author = {Catherine S. Oh and Jeremy N. Bailenson and Gregory F. Welch}, editor = {Doron Friedman}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/10/Oh2018.pdf}, doi = {10.3389/frobt.2018.00114}, isbn = {2296-9144}, year = {2018}, date = {2018-10-15}, journal = {Frontiers in Robotics and AI}, volume = {5}, number = {114}, abstract = {Social presence, or the feeling of being there with a “real” person, is a crucial component of interactions that take place in virtual reality. This paper reviews the concept, antecedents, and implications of social presence, with a focus on the literature regarding the predictors of social presence. The article begins by exploring the concept of social presence, distinguishing it from two other dimensions of presence—telepresence and self-presence. After establishing the definition of social presence, the article offers a systematic review of 222 separate findings identified from 150 studies that investigate the factors (i.e., immersive qualities, contextual differences, and individual psychological traits) that predict social presence. Finally, the paper discusses the implications of heightened social presence and when it does and does not enhance one’s experience in a virtual environment.}, keywords = {A-gfw, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Jung2018b, title = {Over My Hand: Using a Personalized Hand in VR to Improve Object Size Estimation, Body Ownership, and Presence}, author = {Sungchul Jung and Gerd Bruder and Pamela Wisniewski and Chistian Sandor and Charles E. Hughes}, year = {2018}, date = {2018-10-13}, booktitle = {Proceedings of the 6th ACM Symposium on Spatial User Interaction (SUI 2018), Berlin, Germany, October 13-14, 2018}, note = {Best Paper Award}, keywords = {A-ceh, A-gb, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch:2018, title = {The Rise of Allocentric Interfaces and the Collapse of the Virtuality Continuum}, author = {Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Welch2018.pdf}, doi = {10.1145/3267782.3278470}, isbn = {978-1-4503-5708-1}, year = {2018}, date = {2018-10-01}, booktitle = {Proceedings of the Symposium on Spatial User Interaction}, pages = {192--192}, publisher = {ACM}, address = {Berlin, Germany}, series = {SUI '18}, keywords = {A-gfw, F-NSF, F-ONR, P-ARA, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Kim2018b, title = {Revisiting Trends in Augmented Reality Research: A Review of the 2nd Decade of ISMAR (2008–2017)}, author = {Kangsoo Kim and Mark Billinghurst and Gerd Bruder and Henry Been-Lirn Duh and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/08/Kim2018b.pdf}, doi = {10.1109/TVCG.2018.2868591}, issn = {1077-2626}, year = {2018}, date = {2018-09-06}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {24}, number = {11}, pages = {2947-2962}, abstract = {In 2008, Zhou et al. presented a survey paper summarizing the previous ten years of ISMAR publications, which provided invaluable insights into the research challenges and trends associated with that time period. Ten years later, we review the research that has been presented at ISMAR conferences since the survey of Zhou et al., at a time when both academia and the AR industry are enjoying dramatic technological changes. Here we consider the research results and trends of the last decade of ISMAR by carefully reviewing the ISMAR publications from the period of 2008–2017, in the context of the first ten years. The numbers of papers for different research topics and their impacts by citations were analyzed while reviewing them—which reveals that there is a sharp increase in AR evaluation and rendering research. Based on this review we offer some observations related to potential future research areas or trends, which could be helpful to AR researchers and industry members looking ahead.}, keywords = {A-gb, A-gfw, A-kk, F-NSF, F-ONR, P-ARA, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Norouzi2018b, title = {Augmented Rotations in Virtual Reality for Users with a Reduced Range of Head Movement}, author = {Nahal Norouzi and Luke Bölling and Gerd Bruder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/ICDVRAT-ITAG-2018-Conference-Proceedings-122-129.pdf}, isbn = {978-0-7049-1548-0}, year = {2018}, date = {2018-09-04}, booktitle = {Proceedings of the 12th international conference on disability, virtual reality and associated technologies (ICDVRAT 2018)}, pages = {8}, abstract = {A large body of research in the field of virtual reality (VR) is focused on making user interfaces more natural and intuitive by leveraging natural body movements to explore a virtual environment. For example, head-tracked user interfaces allow users to naturally look around a virtual space by moving their head. However, such approaches may not be appropriate for users with temporary or permanent limitations of their head movement. In this paper, we present techniques that allow these users to get full-movement benefits from a reduced range of physical movements. Specifically, we describe two techniques that augment virtual rotations relative to physical movement thresholds. We describe how each of the two techniques can be implemented with either a head tracker or an eye tracker, e.g., in cases when no physical head rotations are possible. We discuss their differences and limitations and we provide guidelines for the practical use of such augmented user interfaces.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Norouzi2018, title = {Assessing Vignetting as a Means to Reduce VR Sickness During Amplified Head Rotations}, author = {Nahal Norouzi and Gerd Bruder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/19-32-norouzi-1.pdf}, doi = {10.1145/3225153.3225162}, isbn = {978-1-4503-5894-1/18/08}, year = {2018}, date = {2018-08-10}, booktitle = {ACM Symposium on Applied Perception 2018}, pages = {8}, organization = {ACM}, abstract = {Redirected and amplified head movements have the potential to provide more natural interaction with virtual environments (VEs) than using controller-based input, which causes large discrepancies between visual and vestibular self-motion cues and leads to increased VR sickness. However, such amplified head movements may also exacerbate VR sickness symptoms over no amplification. Several general methods have been introduced to reduce VR sickness for controller-based input inside a VE, including a popular vignetting method that gradually reduces the field of view. In this paper, we investigate the use of vignetting to reduce VR sickness when using amplified head rotations instead of controller-based input. We also investigate whether the induced VR sickness is a result of the user's head acceleration or velocity by introducing two different modes of vignetting, one triggered by acceleration and the other by velocity. Our dependent measures were pre and post VR sickness questionnaires as well as estimated discomfort levels that were assessed each minute of the experiment. Our results show interesting effects between a baseline condition without vignetting, as well as the two vignetting methods, generally indicating that the vignetting methods did not succeed in reducing VR sickness for most of the participants and, instead, lead to a significant increase. We discuss the results and potential explanations of our findings.}, keywords = {A-gb, A-gfw, A-nn, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Langbehn2018, title = {In the Blink of an Eye – Leveraging Blink-Induced Suppression for Imperceptible Position andOrientation Redirection in Virtual Reality}, author = {Eike Langbehn and Frank Steinicke and Markus Lappe and Gregory F. Welch and Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/05/Langbehn2018.pdf}, doi = {10.1145/3197517.3201335}, year = {2018}, date = {2018-08-01}, journal = {ACM Transactions of Graphics (TOG), Special Issue on ACM SIGGRAPH 2018}, volume = {37}, number = {4}, pages = {1-11}, chapter = {66}, keywords = {A-gb, A-gfw, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Abualsamid2018b, title = {Modeling Augmentative Communication with Amazon Lex and Polly}, author = {Ahmad Abualsamid and Charles E. Hughes}, editor = {Tareq Z. Ahram and Christianne Falcão }, url = {https://doi.org/10.1007/978-3-319-94947-5_85}, year = {2018}, date = {2018-06-28}, urldate = {2018-08-22}, booktitle = {Advances in Usability, User Experience and Assistive Technology. AHFE 2018. Advances in Intelligent Systems and Computing}, volume = {794}, pages = {871-879}, publisher = {Springer, Cham}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Barmaki2018, title = {Embodiment Analytics of Practicing Teachers in a Virtual Rehearsal Environment}, author = {Roghayeh Barmaki and Charles E. Hughes}, doi = {10.1111/jcal.12268}, year = {2018}, date = {2018-05-21}, journal = {Journal of Computer Assisted Learning}, volume = {34}, number = {4}, pages = {387-396}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Abualsamid2018, title = {Using a Mobile App to Reduce Off-Task Behaviors in Classrooms: A Pilot Study}, author = {Ahmad Abualsamid and Charles E. Hughes}, doi = {10211.3/203008 }, year = {2018}, date = {2018-05-17}, journal = {Journal on Technology and Persons with Disabilities}, volume = {6}, pages = {378-384}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Taylor2018aa, title = {Augmented Reality for Tactical Combat Casualty Care Training}, author = {Glenn Taylor and Anthony Deschamps and Alyssa Tanaka and Denise Nicholson and Gerd Bruder and Gregory Welch and Francisco Guido-Sanz}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Taylor2018aa.pdf}, isbn = {978-3-319-91467-1}, year = {2018}, date = {2018-05-03}, booktitle = {Augmented Cognition: Users and Contexts}, pages = {227--239}, publisher = {Springer International Publishing}, keywords = {A-gb, A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Kim2018, title = {Improving Social Presence with a Virtual Human via Multimodal Physical–Virtual Interactivity in AR}, author = {Kangsoo Kim}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/05/Kim2018.pdf}, doi = {10.1145/3170427.3180291}, isbn = {978-1-4503-5621-3}, year = {2018}, date = {2018-04-26}, urldate = {2018-05-11}, booktitle = {Extended Abstracts of the 2018 CHI Conference on Human Factors in Computing Systems, Montreal QC, Canada — April 21 - 26, 2018 }, number = {SRC09}, pages = {SRC09:1--SRC09:6}, publisher = {ACM}, address = {New York, NY, USA}, series = {CHI EA '18}, abstract = {In a social context where a real human interacts with a virtual human (VH) in the same space, one's sense of social/co-presence with the VH is an important factor for the quality of interaction and the VH's social influence to the human user in context. Although augmented reality (AR) enables the superposition of VHs in the real world, the resulting sense of social/co-presence is usually far lower than with a real human. In this paper, we introduce a research approach employing multimodal interactivity between the virtual environment and the physical world, where a VH and a human user are co-located, to improve the social/co-presence with the VH. A preliminary study suggests a promising effect on the sense of copresence with a VH when a subtle airflow from a real fan can blow a virtual paper and curtains next to the VH as a physical–virtual interactivity. Our approach can be generalized to support social/co-presence with any virtual contents in AR beyond the particular VH scenarios.}, keywords = {A-kk, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Thiamwong2018aa, title = {[POSTER] Fear of falling and eye movement behavior in young adults and older adults during walking: A case study}, author = {Ladda Thiamwong and Nahal Norouzi and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/03/Thiamwong2018aa.pdf}, year = {2018}, date = {2018-04-11}, booktitle = {39th Annual Southern Gerontological Society Meeting}, address = {Buford, GA USA}, keywords = {A-gfw, A-nn, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hochreiter2018, title = {Cognitive and touch performance effects of mismatched 3D physical and visual perceptions}, author = {Jason Hochreiter and Salam Daher and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/05/hochreiter2018.pdf}, year = {2018}, date = {2018-03-22}, booktitle = {IEEE Virtual Reality 2018 (VR 2018)}, abstract = {In a controlled human-subject study we investigated the effects of mismatched physical and visual perception on cognitive load and performance in an Augmented Reality (AR) touching task by varying the physical fidelity (matching vs. non-matching physical shape) and visual mechanism (projector-based vs. HMD-based AR) of the representation. Participants touched visual targets on four corresponding physical-visual representations of a human head. We evaluated their performance in terms of touch accuracy, response time, and a cognitive load task requiring target size estimations during a concurrent (secondary) counting task. Results indicated higher performance, lower cognitive load, and increased usability when participants touched a matching physical head-shaped surface and with visuals provided by a projector from underneath.}, keywords = {A-gb, A-gfw, A-jh, A-sd, F-NSF, F-ONR, P-BED, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Jung2018, title = {In Limbo: The Effect of Gradual Visual Transition between Real and Virtual on Virtual Body Ownership Illusion and Presence}, author = {Sungchul Jung and Pamela Wisniewski and Charles E. Hughes}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Jung2018aa.pdf}, year = {2018}, date = {2018-03-18}, booktitle = {IEEE Virtual Reality Conference 2018 (IEEE VR 2018), Reutlingen, Germany, March 18-22}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hochreiter2018b, title = {Optical Touch Sensing on Non-Parametric Rear-Projection Surfaces}, author = {Jason Hochreiter}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/06/Hochreiter2018b.pdf}, doi = {10.1109/VR.2018.8446552}, year = {2018}, date = {2018-03-18}, booktitle = {2018 IEEE Conference on Virtual Reality and 3D User Interfaces (VR), Reutlingen}, pages = {805-806}, keywords = {A-jh, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Ariza2018a, title = {Analysis of Proximity-Based Multimodal Feedback for 3D Selection in Immersive Virtual Environments}, author = {Oscar Ariza and Gerd Bruder and Nicholas Katzakis and Frank Steinicke}, doi = {10.1109/VR.2018.8446317}, year = {2018}, date = {2018-03-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {327-334}, abstract = {Interaction tasks in virtual reality (VR) such as three-dimensional (3D) selection or manipulation of objects often suffer from reduced performance due to missing or different feedback provided by VR systems than during corresponding real-world interactions. Vibrotactile and auditory feedback have been suggested as additional perceptual cues complementing the visual channel to improve interaction in VR. However, it has rarely been shown that multimodal feedback improves performance or reduces errors during 3D object selection. Only little research has been conducted in the area of proximity-based multimodal feedback, in which stimulus intensities depend on spatiotemporal relations between input device and the virtual target object. In this paper, we analyzed the effects of unimodal and bimodal feedback provided through the visual, auditory and tactile modalities, while users perform 3D object selections in VEs, by comparing both binary and continuous proximity-based feedback. We conducted a Fitts' Law experiment and evaluated the different feedback approaches. The results show that the feedback types affect ballistic and correction phases of the selection movement, and significantly influence the user performance.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Lee2018, title = {Effects of Unaugmented Periphery and Vibrotactile Feedback on Proxemics with Virtual Humans in AR}, author = {Myungho Lee and Gerd Bruder and Tobias Hollerer and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/04/Lee2018.pdf}, doi = {10.1109/TVCG.2018.2794074 }, year = {2018}, date = {2018-02-23}, journal = {IEEE Transactions on Visualization and Computer Graphics}, volume = {24}, number = {4}, pages = {1525-1534}, abstract = {In this paper, we investigate factors and issues related to human locomotion behavior and proxemics in the presence of a real or virtual human in augmented reality (AR). First, we discuss a unique issue with current-state optical see-through head-mounted displays. Second, we discuss the limited multimodal feedback provided by virtual humans in AR, present a potential improvement based on vibrotactile feedback induced via the floor to compensate for the limited augmented visual field, and report results showing that benefits of such vibrations are less visible in objective locomotion behavior than in subjective estimates of co-presence. Third, we investigate and document significant differences in the effects that real and virtual humans have on locomotion behavior in AR. We discuss potential explanations for these effects and analyze effects of different types of behaviors that such real or virtual humans may exhibit in the presence of an observer.}, keywords = {A-gb, A-gfw, F-ONR, P-EPICAR, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Barmaki2018b, title = {Gesturing and Embodiment in Teaching: Investigating the Nonverbal Behavior of Teachers in a Virtual Rehearsal Environment}, author = {Roghayeh Barmaki and Charles Hughes}, url = {https://www.aaai.org/ocs/index.php/AAAI/AAAI18/paper/viewFile/17145/16409}, year = {2018}, date = {2018-02-03}, booktitle = {The Eighth Symposium on Educational Advances in Artificial Intelligence 2018 (EAAI-18), New Orleans, February 3-4}, pages = {7893-7899}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Abualsamid2018c, title = {Why Is Video Modeling Not Used in Special Needs Classrooms? }, author = {Ahmad Abualsamid and Charles E. Hughes}, editor = {Terence Andre}, doi = {https://doi.org/10.1007/978-3-319-60018-5}, year = {2018}, date = {2018-01-01}, booktitle = {Advances in Human Factors in Training, Education, and Learning Sciences. AHFE 2017. Advances in Intelligent Systems and Computing}, volume = {596}, pages = {123-130}, publisher = {Springer, Cham.}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Jo2017, title = {[POSTER] The Impact of Avatar-owner Visual Similarity on Body Ownership in Immersive Virtual Reality}, author = {Dongsik Jo and Kangsoo Kim and Gregory F. Welch and Woojin Jeon and Yongwan Kim and Ki-Hong Kim and Gerard Jounghyun Kim}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/05/Jo2017.pdf}, doi = {10.1145/3139131.3141214}, isbn = {978-1-4503-5548-3}, year = {2017}, date = {2017-11-08}, booktitle = {Proceedings of the 23rd ACM Symposium on Virtual Reality Software and Technology}, pages = {77:1--77:2}, publisher = {ACM}, address = {Gothenburg, Sweden}, series = {VRST '17}, abstract = {In this paper we report on an investigation of the effects of a self-avatar's visual similarity to a user's actual appearance, on their perceptions of the avatar in an immersive virtual reality (IVR) experience. We conducted a user study to examine the participant's sense of body ownership, presence and visual realism under three levels of avatar-owner visual similarity: (L1) an avatar reconstructed from real imagery of the participant's appearance, (L2) a cartoon-like virtual avatar created by a 3D artist for each participant, where the avatar shoes and clothing mimic that of the participant, but using a low-fidelity model, and (L3) a cartoon-like virtual avatar with a pre-defined appearance for the shoes and clothing. Surprisingly, the results indicate that the participants generally exhibited the highest sense of body ownership and presence when inhabiting the cartoon-like virtual avatar mimicking the outfit of the participant (L2), despite the relatively low participant similarity. We present our experiment and main findings, also, discuss the potential impact of a self-avatar's visual differences on human perceptions in IVR.}, keywords = {A-gfw, A-kk, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2017ad, title = {System for Detecting Sterile Field Events and Related Methods}, author = {Gregory Welch and Arjun Nagendran and Jason Hochreiter and Laura Gonzalez and Hassan Foroosh}, url = {https://patents.google.com/patent/US9808549B2/en?oq=US+9808549 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=/netahtml/PTO/srchnum.htm&r=1&f=G&l=50&s1=9,808,549.PN.&OS=PN/9,808,549&RS=PN/9,808,549}, year = {2017}, date = {2017-11-07}, number = {US 9808549B2}, location = {US}, abstract = {A system is for monitoring a sterile field associated with a medical procedure. The system may include a sensor being adjacent an area where the medical procedure is to be performed. The sensor is configured to monitor at least the area, a patient for the medical procedure, and a medical technician for the medical procedure. The system may include a processor coupled to the sensor and configured to detect a sterile field event, and an associated location for the sterile field event, and an output device coupled to the processor and configured to generate an alert indicator when the sterile field event is detected, the alert indicator also including the associated location.}, note = {Filed: 2015-12-23}, keywords = {A-gfw, A-jh, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Schmidt2017a, title = {[POSTER] A Pilot Study of Altering Depth Perception with Projection-Based Illusions}, author = {Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/01/Schmidt2017a.pdf}, year = {2017}, date = {2017-11-01}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE)}, pages = {33-34}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Schmidt2017b, title = {Moving Towards Consistent Depth Perception in Stereoscopic Projection-based Augmented Reality}, author = {Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2019/01/Schmidt2017b.pdf}, year = {2017}, date = {2017-11-01}, booktitle = {Proceedings of the International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments (ICAT-EGVE)}, pages = {161-168}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Nojavanasghari2017, title = {Hand2Face: Automatic Synthesis and Recognition of Hand Over Face Occlusions}, author = {Behnaz Nojavanasghari and Charles E. Hughes and Tadas Baltrušaitis and Louis-Philippe Morency}, url = {http://acii2017.org/accepted_papers}, year = {2017}, date = {2017-10-23}, booktitle = {Proceedings of Affective Computing and Intelligent Interaction (ACII 2017), San Antonio, TX, Oct. 23-26}, pages = {209-215}, address = {San Antonio, TX}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2017ac, title = {Methods, systems, and computer readable media for utilizing synthetic animatronics}, author = {Gregory Welch and Kurtis Keller and Andrei State and Henry Fuchs and Ryan Schubert}, url = {https://patents.google.com/patent/US9792715B2/en?oq=US+9792715 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=9,792,715.PN.&OS=PN/9,792,715&RS=PN/9,792,715}, year = {2017}, date = {2017-10-17}, number = {US 9792715B2}, location = {US}, abstract = {Methods, systems, and computer readable media for utilizing synthetic animatronics are disclosed. According to one aspect, a method for synthetic animatronics includes providing a display surface having different regions that accommodate different positions or deformations of a subject, mapping images of the subject to the different regions on the display surface, and displaying the mapped images on the different regions of the display surface at different times in accordance with a desired animation of the subject.}, note = {Filed: 2013-05-17}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Jung2017, title = {RealME: The Influence of Body and Hand Representations on Body Ownership and Presence}, author = {Sungchul Jung and Christian Sandor and Pamela Wisniewski and Charles E. Hughes}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Jung2017.pdf}, doi = {10.1145/3131277.3132186}, year = {2017}, date = {2017-10-16}, booktitle = {Proceedings of the 5th Symposium on Spatial User Interaction (SUI 2017), Brighton, UK, October 16-17, 2017}, pages = {3-11}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @misc{daher2017physical, title = {Physical-Virtual Patient Head}, author = {Salam Daher and Laura Gonzalez and Greg Welch}, url = {https://sreal.ucf.edu/florida_nurse_9_17/}, year = {2017}, date = {2017-09-01}, journal = {Florida Nurses Association}, volume = {65}, number = {3}, pages = {9}, keywords = {A-gfw, A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {misc} } @techreport{daher2017linkfoundationreport, title = {Physical-Virtual Patient Bed}, author = {Salam Daher}, url = {https://sreal.ucf.edu/linkfoundation_pvpbreport/}, year = {2017}, date = {2017-08-28}, note = {Daher, Salam.(2017).Physical-Virtual Patient Bed. (Link Foundation Fellowship Final Reports: Modeling, Simulation, and Training Program.) Retrieved from The Scholarship Repository of Florida Institute of Technology website: https://repository.lib.fit.edu/}, keywords = {A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {techreport} } @inbook{Daher2017ab, title = {Effects of Social Priming on Social Presence with Intelligent Virtual Agents}, author = {Salam Daher and Kangsoo Kim and Myungho Lee and Ryan Schubert and Gerd Bruder and Jeremy Bailenson and Greg Welch}, editor = {Jonas Beskow and Christopher Peters and Ginevra Castellano and Carol O'Sullivan and Iolanda Leite and Stefan Kopp}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/12/Daher2017ab.pdf}, doi = {10.1007/978-3-319-67401-8_10}, year = {2017}, date = {2017-08-26}, booktitle = {Intelligent Virtual Agents: 17th International Conference, IVA 2017, Stockholm, Sweden, August 27-30, 2017, Proceedings}, volume = {10498}, pages = {87-100}, publisher = {Springer International Publishing}, keywords = {A-gb, A-gfw, A-kk, A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inbook} } @patent{Welch2017ab, title = {Physical-virtual patient bed system}, author = {Gregory Welch and Karen Aroian and Steven Talbert and Kelly Allred and Patricia Weinstein and Arjun Nagendran and Remo Pillat}, url = {https://patents.google.com/patent/US9679500B2/en?oq=US+9679500 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=9,679,500.PN.&OS=PN/9,679,500&RS=PN/9,679,500}, year = {2017}, date = {2017-06-13}, number = {US 9679500B2}, location = {US}, abstract = {A patient simulation system for healthcare training is provided. The system includes one or more interchangeable shells comprising a physical anatomical model of at least a portion of a patient's body, the shell adapted to be illuminated from behind to provide one or more dynamic images viewable on the outer surface of the shells; a support system adapted to receive the shells via a mounting system, wherein the system comprises one or more image units adapted to render the one or more dynamic images viewable on the outer surface of the shells; one or more interface devices located about the patient shells to receive input and provide output; and one or more computing units in communication with the image units and interface devices, the computing units adapted to provide an interactive simulation for healthcare training.}, note = {Filed: 2014-03-12}, keywords = {A-gfw, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Jung2017b, title = {Pilot Study: The Effect of Real User Body Cues to The Perception on Virtual Body}, author = {Sungchul Jung and Chistian Sandor and Charles E. Hughes}, url = {http://casa2017.kaist.ac.kr/wordpress/wp-content/uploads/2017/06/47-Jung.pdf}, year = {2017}, date = {2017-05-22}, booktitle = {Proceedings of the 30th Conference on Computer Animation and Social Agents (CASA 2017), Seoul, Korea, May 22-24, 2017}, pages = {47-50}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Kim2017b, title = {The effects of virtual human's spatial and behavioral coherence with physical objects on social presence in AR}, author = {Kangsoo Kim and Divine Maloney and Gerd Bruder and Jeremy Bailenson and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/12/Kim2017b.pdf}, doi = {10.1002/cav.1771}, year = {2017}, date = {2017-05-21}, journal = {Computer Animation and Virtual Worlds}, volume = {28}, number = {3-4}, pages = {e1771-n/a}, keywords = {A-gb, A-gfw, A-kk, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{daher2017optical, title = {[DC] Optical See-Through vs. Spatial Augmented Reality Simulators for Medical Applications}, author = {Salam Daher}, url = {https://sreal.ucf.edu/dc_ieeevr2017_20170226_0828/}, doi = {10.1109/VR.2017.7892354}, year = {2017}, date = {2017-05-20}, booktitle = {Virtual Reality (VR), 2017 IEEE}, pages = {417-418}, organization = {IEEE}, abstract = {Currently healthcare practitioners use standardized patients, physical mannequins, and virtual patients as surrogates for real patients to provide a safe learning environment for students. Each of these simulators has different limitation that could be mitigated with various degrees of fidelity to represent medical cues. As we are exploring different ways to simulate a human patient and their effects on learning, we would like to compare the dynamic visuals between spatial augmented reality and a optical see-through augmented reality where a patient is rendered using the HoloLens and how that affects depth perception, task completion, and social presence.}, keywords = {A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Nojavanasghari2017b, title = {Hands-on: Context-Driven Hand Gesture Recognition for Automatic Recognition of Curiosity}, author = {Behnaz Nojavanasghari and Louis-Philippe Morency and Charles E. Hughes}, year = {2017}, date = {2017-05-07}, booktitle = {Proceedings of CHI 2017 Workshop: Designing for Curiosity. Denver, CO, May 7}, note = {Poster and Short Paper}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Nojavanasghari2017b, title = {Exceptionally Social: Design of an Avatar-Mediated Interactive System for Promoting Social Skills in Children with Autism}, author = {Behnaz Nojavanasghari and Louis-Philippe Morency and Charles E. Hughes}, url = {https://dl.acm.org/citation.cfm?id=3053112&dl=ACM&coll=DL&CFID=835574635&CFTOKEN=94685731}, year = {2017}, date = {2017-05-06}, booktitle = {Proceedings of CHI 2017. Denver, CO, May 6-11}, pages = {1932-1939}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Lee2017, title = {Effect of Vibrotactile Feedback through the Floor on Social Presence in an Immersive Virtual Environment}, author = {Myungho Lee and Gerd Bruder and Greg Welch}, doi = {10.1167/17.10.357}, year = {2017}, date = {2017-05-01}, journal = {Journal of Vision: Abstract Issue 2017}, volume = {17}, number = {10}, pages = {357}, keywords = {A-gb, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Schubert2017, title = {Mitigating Perceptual Error in Synthetic Animatronics using Visual Feature Flow}, author = {Ryan Schubert and Gerd Bruder and Greg Welch}, doi = {10.1167/17.10.331}, year = {2017}, date = {2017-05-01}, journal = {Journal of Vision: Abstract Issue 2017}, volume = {17}, number = {10}, pages = {331}, keywords = {A-gb, A-gfw, F-NSF, F-ONR, P-BED, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Lee2017aa, title = {Exploring the Effect of Vibrotactile Feedback through the Floor on Social Presence in an Immersive Virtual Environment}, author = {Myungho Lee and Gerd Bruder and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Lee2017aa.pdf}, doi = {10.1109/VR.2017.7892237 }, year = {2017}, date = {2017-03-19}, booktitle = {Proceedings of IEEE Virtual Reality 2017}, keywords = {A-gb, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Skarbez2017aa, title = {[POSTER] Coherence Changes Gaze Behavior in Virtual Human Interactions}, author = {Richard Skarbez and Gregory F. Welch and Frederick P. Brooks and Mary Whitton}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Skarbez2017aa.pdf}, doi = {10.1109/VR.2017.7892289}, year = {2017}, date = {2017-03-19}, booktitle = {2017 IEEE Virtual Reality (VR)}, pages = {287-288}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Daher2017aa, title = {[POSTER] Can Social Presence be Contagious? Effects of Social Presence Priming on Interaction with Virtual Humans}, author = {Salam Daher and Kangsoo Kim and Myungho Lee and Gerd Bruder and Ryan Schubert and Jeremy Bailenson and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Daher2017aa_red.pdf}, doi = {10.1109/3DUI.2017.7893341 }, year = {2017}, date = {2017-03-18}, booktitle = {3D User Interfaces (3DUI), 2017 IEEE Symposium on }, keywords = {A-gb, A-gfw, A-kk, A-sd, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Langbehn2017a, title = {[DEMO] Application of Redirected Walking in Room-Scale VR}, author = {Eike Langbehn and Paul Lubos and Gerd Bruder and Frank Steinicke}, year = {2017}, date = {2017-03-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, journal = {Proceedings of IEEE Virtual Reality (VR)}, pages = {449-450}, abstract = {Redirected walking (RDW) promises to allow near-natural walking in an infinitely large virtual environment (VE) by subtle manipulations of the virtual camera. Previous experiments showed that a physical radius of at least 22 meters is required for undetectable RDW. However, we found that it is possible to decrease this radius and to apply RDW to room-scale VR, i. e., up to approximately 5m x 5m. This is done by using curved paths in the VE instead of straight paths, and by coupling them together in a way that enables continuous walking. Furthermore, the corresponding paths in the real world are laid out in a way that fits perfectly into room-scale VR. In this research demo, users can experience RDW in a room-scale head-mounted display VR setup and explore a VE of approximately 25m x 25m.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Langbehn2017b, title = {Bending the Curve: Sensitivity to Bending of Curved Paths and Application in Room-Scale VR}, author = {Eike Langbehn and Paul Lubos and Gerd Bruder and Frank Steinicke}, doi = {10.1109/TVCG.2017.2657220}, year = {2017}, date = {2017-03-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG), Special Issue on IEEE Virtual Reality (VR)}, volume = {23}, number = {4}, pages = {1389-1398}, abstract = {Redirected walking (RDW) promises to allow near-natural walking in an infinitely large virtual environment (VE) by subtle manipulations of the virtual camera. Previous experiments analyzed the human sensitivity to RDW manipulations by focusing on the worst-case scenario, in which users walk perfectly straight ahead in the VE, whereas they are redirected on a circular path in the real world. The results showed that a physical radius of at least 22 meters is required for undetectable RDW. However, users do not always walk exactly straight in a VE. So far, it has not been investigated how much a physical path can be bent in situations in which users walk a virtual curved path instead of a straight one. Such curved walking paths can be often observed, for example, when users walk on virtual trails, through bent corridors, or when circling around obstacles. In such situations the question is not, whether or not the physical path can be bent, but how much the bending of the physical path may vary from the bending of the virtual path. In this article, we analyze this question and present redirection by means of bending gains that describe the discrepancy between the bending of curved paths in the real and virtual environment. Furthermore, we report the psychophysical experiments in which we analyzed the human sensitivity to these gains. The results reveal encouragingly wider detection thresholds than for straightforward walking. Based on our findings, we discuss the potential of curved walking and present a first approach to leverage bent paths in a way that can provide undetectable RDW manipulations even in room-scale VR.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{Ariza2017a, title = {Vibrotactile Assistance for User Guidance Towards Selection Targets in VR and the Cognitive Resources Involved}, author = {Oscar Ariza and Markus Lange and Frank Steinicke and Gerd Bruder}, year = {2017}, date = {2017-03-01}, booktitle = {Proceedings of IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {95-98}, abstract = {Current head-mounted displays (HMDs) provide a large binocular field of view (FOV) for natural interaction in virtual environments (VEs). However, the selection of objects located in the periphery and outside the FOV requires visual search by head rotations, which can reduce the performance of interaction in virtual reality (VR). This technote explores the use of a pair of self-made wireless and wearable devices, which once attached to both hemispheres of the user's head provide assistive vibrotactile cues for guidance in order to reduce the time used to turn and locate a target object. We present an experiment based on a dual-tasking method to analyze cognitive demands and performance metrics during a set of selection tasks followed by a working memory task.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2017aa, title = {Methods, systems, and computer readable media for shader-lamps based physical avatars of real and virtual people}, author = {Gregory Welch and Henry Fuchs and Peter Lincoln and Andrew Nashel and Andrei State}, url = {https://patents.google.com/patent/US9538167B2/en?oq=US+9538167 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=9538167.PN.&OS=PN/9538167&RS=PN/9538167}, year = {2017}, date = {2017-01-03}, number = {US 9538167B2}, location = {US}, abstract = {Methods, systems, and computer readable media for shader lamps-based avatars of real and virtual people are disclosed. According to one method, shader lamps-based avatars of real and virtual objects are displayed on physical target objects. The method includes obtaining visual information of a source object and generating at least a first data set of pixels representing a texture image of the source object. At least one of a size, shape, position, and orientation of a 3D physical target object are determined. A set of coordinate data associated with various locations on the surface of the target object are also determined. The visual information is mapped to the physical target object. Mapping includes defining a relationship between the first and second sets of data, wherein each element of the first set is related to each element of the second set. The mapped visual information is displayed on the physical target object using a display module, such as one or more projectors located at various positions around the target object.}, note = {Filed: 2010-03-08}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Kim2017, title = {A Large-Scale Study of Surrogate Physicality and Gesturing on Human–Surrogate Interactions in a Public Space}, author = {Kangsoo Kim and Arjun Nagendran and Jeremy N. Bailenson and Andrew Raij and Gerd Bruder and Myungho Lee and Ryan Schubert and Xin Yan and Gregory F. Welch}, url = {http://journal.frontiersin.org/article/10.3389/frobt.2017.00032 https://sreal.ucf.edu/wp-content/uploads/2017/07/Kim2017.pdf}, doi = {10.3389/frobt.2017.00032}, issn = {2296-9144}, year = {2017}, date = {2017-01-01}, journal = {Frontiers in Robotics and AI}, volume = {4}, pages = {1-20}, keywords = {A-gb, A-gfw, A-kk, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Dieker2017, title = {Using simulated virtual environments to improve teacher performance}, author = {Lisa Dieker and Charles E. Hughes and Michael Hynes and Carrie Straub}, url = {http://napds.org/wp-content/uploads/2017/07/Using-Simulated-Virtual-Environments-to-Improve-Teacher-Performance-.pdf}, year = {2017}, date = {2017-01-01}, journal = {School University Partnerships (Journal of the National Association for Professional Development Schools): Special Issue: Technology to Enhance PDS}, volume = {10}, number = {3}, pages = {62-81}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Taylor2017b, title = {Impact of virtual simulation on the interprofessional communication skills of physical therapy students}, author = {Matthew S. Taylor and Jennifer Tucker and Clair Donehower and Patrick Pabian and Lisa Dieker and Michael Hynes and Charles E. Hughes}, url = {https://aptaeducation.org/members/jopte/ }, year = {2017}, date = {2017-01-01}, journal = {Journal of Physical Therapy Education}, volume = {31}, number = {3}, pages = {83-90}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Kim2017a, title = {Exploring the Effects of Observed Physicality Conflicts on Real-virtual Human Interaction in Augmented Reality}, author = { Kangsoo Kim and Gerd Bruder and Greg Welch}, url = {http://doi.acm.org/10.1145/3139131.3139151 https://sreal.ucf.edu/wp-content/uploads/2017/12/Kim2017a.pdf}, doi = {10.1145/3139131.3139151}, isbn = {978-1-4503-5548-3}, year = {2017}, date = {2017-01-01}, booktitle = {Proceedings of the 23rd ACM Symposium on Virtual Reality Software and Technology}, pages = {31:1--31:7}, publisher = {ACM}, address = {Gothenburg, Sweden}, series = {VRST '17}, keywords = {A-gb, A-gfw, A-kk, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Abualsamid2017b, title = {Language Sample Analysis Framework Utilizing the Natural Language Toolkit and Social Media}, author = {Ahmad Abualsamid and Charles E. Hughes}, editor = {Vincent G. Duffy and Nancy Lightner }, year = {2017}, date = {2017-01-01}, booktitle = {Advances in Human Factors and Ergonomics in Healthcare. Advances in Intelligent Systems and Computing}, issuetitle = { Advances in Intelligent Systems and Computing}, volume = {482}, pages = {445-456}, publisher = {Springer, Cham}, keywords = {SREAL}, pubstate = {published}, tppubtype = {inbook} } @inbook{Abualsamid2017, title = {Language Sample Analysis Framework Utilizing the Natural Language Toolkit and Social Media}, author = {Ahmad Abualsamid and Charles E. Hughes}, editor = {Vincent G. Duffy and Nancy Lightner}, url = {http://dx.doi.org/10.1007/978-3-319-41652-6_41}, doi = {10.1007/978-3-319-41652-6_41}, isbn = {978-3-319-41652-6}, year = {2017}, date = {2017-01-01}, booktitle = {Advances in Human Factors and Ergonomics in Healthcare}, issuetitle = {Proceedings of the AHFE 2016 International Conference on Human Factors and Ergonomics in Healthcare, July 27-31, 2016, Walt Disney World®, Florida, USA}, volume = {482}, pages = {445--456}, publisher = {Springer International Publishing}, address = {Cham}, series = {Advances in Intelligent Systems and Computing}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{JBLGPS17, title = {Walking in Virtual Reality: Effects of Manipulated Visual Self-Motion on Walking Biomechanics}, author = {Omar Janeh and Eike Langbehn and Frank Steinicke and Gerd Bruder and Alessandro Gulberti and Monika Poetter-Nerger }, url = {https://sreal.ucf.edu/wp-content/uploads/2017/05/JBLGPS17-red.pdf}, doi = {10.1145/3022731 }, year = {2017}, date = {2017-01-01}, journal = {ACM Transactions on Applied Perception (TAP)}, volume = {14}, number = {2}, pages = {12:1--12:15}, keywords = {A-gb, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{kim2016ab, title = {The influence of real human personality on social presence with a virtual human in augmented reality.}, author = {Kangsoo Kim and Gerd Bruder and Divine Maloney and Greg Welch}, editor = {Dirk Reiners and Daisuke Iwai and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Kim2016ab.pdf}, year = {2016}, date = {2016-12-07}, booktitle = {ICAT‐EGVE 2016 ‐ International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}, address = {Little Rock, AR, USA}, organization = {The Eurographics Association}, keywords = {A-gb, A-gfw, A-kk, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Schubert2016aa, title = {HuSIS: A Dedicated Space for Studying Human Interactions}, author = {Ryan Schubert and Greg Welch and Salam Daher and Andrew Raij}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Schubert2016aa.pdf}, year = {2016}, date = {2016-11-01}, journal = {IEEE Computer Graphics and Applications}, volume = {36}, number = {6}, publisher = {IEEE Computer Society Press}, keywords = {A-gfw, A-sd, F-ONR, P-THuSIS, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Petkova2016, title = {Accelerating the distributed simulations of agent-based models using community detection}, author = {Antoniya Petkova and Charles Hughes and Narsingh Deo and Martin Dimitrov}, doi = {10.1109/RIVF.2016.7800264}, year = {2016}, date = {2016-11-01}, booktitle = {2016 IEEE RIVF International Conference on Computing Communication Technologies, Research, Innovation, and Vision for the Future (RIVF)}, pages = {25-30}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Patel2016, title = {MeEmo - Using an avatar to improve social skills in children with ASD}, author = {Sapna Patel and Darin E. Hughes and Charles E. Hughes}, url = {https://www.wocci.org/2016/files/submissions/2016/wocci2016_paper_9.pdf}, year = {2016}, date = {2016-09-06}, booktitle = {Workshop on Child Computer Interaction (WOCCI 2016)}, address = {San Francisco, CA}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Nojavanasghari2016, title = {The Future Belongs to the Curious: Towards Automatic Understanding and Recognition of Curiosity in Children.}, author = {Behnaz Nojavanasghari and Tadas Baltrušaitis and Charles E. Hughes and Louis-Philippe Morency}, url = {https://www.wocci.org/2016/files/submissions/2016/wocci2016_paper_3.pdf}, year = {2016}, date = {2016-09-06}, booktitle = {Workshop on Child Computer Interaction (WOCCI 2016)}, pages = {16-22}, address = {San Francisco, CA}, series = {(WOCCI 2016),}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Kim2016aa, title = {[POSTER] Exploring the Impact of Environmental Effects on Social Presence with a Virtual Human}, author = {Kangsoo Kim and Ryan Schubert and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Kim2016aa.pdf}, year = {2016}, date = {2016-09-01}, booktitle = {Proceedings of the 16th International Conference on Intelligent Virtual Agents (IVA 2016)}, volume = {10011}, pages = {470--474}, address = {Los Angeles, CA}, keywords = {A-gfw, A-kk, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Barmaki2016, title = {Towards the Understanding of Gestures and Vocalization Coordination in Teaching Context}, author = {Roghayeh Barmaki and Charles E. Hughes}, year = {2016}, date = {2016-06-29}, booktitle = {Proceedings of the 9th International Conference on Educational Data Mining}, pages = {663-665}, series = {EDM2016}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Welch2016aa, title = {Highlights of ``Immersive Sciences'' Research in the U.S.A.: Augmented/Virtual Reality and Human Surrogates}, author = {Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2016aa_red.pdf}, year = {2016}, date = {2016-06-01}, journal = {Journal of the Virtual Reality Society of Japan}, volume = {21}, number = {2}, pages = {129--137}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Carbone2016, title = {Psychomotor skills measurement for surgery training using game-based methods}, author = {Tom Carbone and Ruby McDaniel and Charles Hughes}, doi = {10.1109/SeGAH.2016.7586278}, year = {2016}, date = {2016-05-01}, booktitle = {2016 IEEE International Conference on Serious Games and Applications for Health (SeGAH)}, pages = {1-6}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Tanaka2016, title = {Video Game Experience and Basic Robotic Skills}, author = {Alyssa Tanaka and Roger Smith and Charles Hughes}, doi = {10.1109/SeGAH.2016.7586262}, year = {2016}, date = {2016-05-01}, booktitle = {2016 IEEE International Conference on Serious Games and Applications for Health (SeGAH)}, pages = {1-6}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hughes2016b, title = {Enhancing Protective Role-playing Behaviors through avatar-based Scenarios}, author = {Charles E. Hughes and Thomas Hall and Kathleen Ingraham and Jennifer A. Epstein and Darin E. Hughes}, doi = {10.1109/SeGAH.2016.7586229}, year = {2016}, date = {2016-05-01}, booktitle = {2016 IEEE International Conference on Serious Games and Applications for Health (SeGAH)}, pages = {1-6}, note = {Best paper Award}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Welch2016ab, title = {[POSTER] Interactive Rear‐Projection Physical‐Virtual Patient Simulators}, author = {Greg Welch and Salam Daher and Jason Hochreiter and Laura Gonzalez}, url = {https://sreal.ucf.edu/welch_gf-20160121/}, year = {2016}, date = {2016-04-07}, booktitle = {22nd Medicine Meets Virtual Reality (NextMed / MMVR)}, address = {Los Angeles, CA, USA}, keywords = {A-gfw, A-jh, A-sd, SREAL}, pubstate = {published}, tppubtype = {conference} } @conference{Daher2016, title = {[POSTER] Humanikins: Humanity Transfer to Physical Manikins}, author = {Salam Daher and Greg Welch}, url = {https://sreal.ucf.edu/daher_s/}, year = {2016}, date = {2016-04-07}, booktitle = {22nd Medicine Meets Virtual Reality (NextMed / MMVR)}, address = {Los Angeles, CA, USA}, keywords = {A-gfw, A-sd, F-NSF, P-BED, SREAL}, pubstate = {published}, tppubtype = {conference} } @conference{Daher2016aa, title = {[POSTER] Exploring Social Presence Transfer in Real-Virtual Human Interaction}, author = {Salam Daher and Kangsoo Kim and Myungho Lee and Andrew Raij and Ryan Schubert and Jeremy Bailenson and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Daher2016aa.pdf}, year = {2016}, date = {2016-03-01}, booktitle = {Proceedings of IEEE Virtual Reality 2016}, address = {Greenville, SC, USA}, keywords = {A-gfw, A-kk, A-sd, SREAL}, pubstate = {published}, tppubtype = {conference} } @inproceedings{Lee2016, title = {The wobbly table: Increased social presence via subtle incidental movement of a real-virtual table}, author = {Myungho Lee and Kangsoo Kim and Salam Daher and Andrew Raij and Ryan Schubert and Jeremy Bailenson and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Lee2016.pdf}, doi = {10.1109/VR.2016.7504683}, year = {2016}, date = {2016-03-01}, booktitle = {2016 IEEE Virtual Reality (VR)}, pages = {11-17}, keywords = {A-gfw, A-kk, A-sd, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hughes2016, title = {De-escalation Training in an Augmented Virtuality Space}, author = {Charles E. Hughes and Kathleen M. Ingraham}, doi = {10.1109/VR.2016.7504713}, year = {2016}, date = {2016-03-01}, booktitle = {2016 IEEE Virtual Reality (VR)}, pages = {181-182}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Daher2016ab, title = {[POSTER] Preliminary Assessment of Neurologic Symptomatology Using an Interactive Physical‐Virtual Head with Touch}, author = {Salam Daher and Laura Gonzalez and Greg Welch}, url = {https://sreal.ucf.edu/daher_gonzalez_welch_imsh2016_eposter_20160113_2222/}, year = {2016}, date = {2016-01-16}, booktitle = {17th International Meeting on Simulation in Healthcare (IMSH 2016)}, keywords = {A-gfw, A-sd, SREAL}, pubstate = {published}, tppubtype = {conference} } @conference{Gonzalez2016aa, title = {Student Nursing Assessment of Discrete Neurology Symptoms using an Interactive Physical Virtual Head}, author = {Laura Gonzalez and Salam Daher and Jason Hochreiter and Greg Welch}, url = {https://sreal.ucf.edu/inacsl_vph_final/}, year = {2016}, date = {2016-01-01}, booktitle = {Presentation at International Nursing Association for Clinical Simulation and Learning}, keywords = {A-gfw, A-jh, A-sd, SREAL}, pubstate = {published}, tppubtype = {conference} } @article{Hochreiter2016aa, title = {Optical Touch Sensing on Non-Parametric Rear-Projection Surfaces for Interactive Physical-Virtual Experiences}, author = {Jason Hochreiter and Salam Daher and Arjun Nagendran and Laura Gonzalez and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Hochreiter2016aa_red.pdf}, year = {2016}, date = {2016-01-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {25}, number = {1}, keywords = {A-gfw, A-jh, A-sd, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Nojavanasghari2016b, title = {EmoReact: A Multimodal Approach and Dataset for Recognizing Emotional Responses in Children}, author = {Behnaz Nojavanasghari and Tadas Baltrušaitis and Charles E. Hughes and Louis-Philippe Morency}, url = {http://doi.acm.org/10.1145/2993148.2993168}, doi = {10.1145/2993148.2993168}, isbn = {978-1-4503-4556-9}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the 18th ACM International Conference on Multimodal Interaction}, pages = {137--144}, publisher = {ACM}, address = {Tokyo, Japan}, series = {ICMI 2016}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Jung2016, title = {[POSTER] The Effects of Indirectly Implied Real Body Cues to Virtual Body Ownership and Presence in a Virtual Reality Environment}, author = {Sungchul Jung and Charles E. Hughes}, url = {http://doi.acm.org/10.1145/2993369.2996346}, doi = {10.1145/2993369.2996346}, isbn = {978-1-4503-4491-3}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the 22nd ACM Conference on Virtual Reality Software and Technology}, pages = {363--364}, publisher = {ACM}, address = {Munich, Germany}, series = {VRST '16}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Jung2016b, title = {The Effects of Indirect Real Body Cues of Irrelevant Parts on Virtual Body Ownership and Presence}, author = {Sungchul Jung and Charles E. Hughes}, editor = {Dirk Reiners and Daisuke Iwai and Frank Steinicke}, doi = {10.2312/egve.20161442}, issn = {1727-530X}, year = {2016}, date = {2016-01-01}, booktitle = {ICAT-EGVE 2016 - International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments}, publisher = {The Eurographics Association}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Dieker2016, title = {Mixed reality environments in teacher education: Development and future applications}, author = {Lisa Dieker and Benjamin Lignugaris-Kraft and Michael Hynes and Charles E. Hughes}, editor = {Barbara Ludlow and Belva Collins}, year = {2016}, date = {2016-01-01}, booktitle = {Online in Real Time: Using WEB 2.0 for Distance Education in Rural Special Education}, pages = {122-131}, publisher = {American Council for Rural Special Educators}, chapter = {12}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{LBAS16, title = {Ambiculus: LED-based Low-Resolution Peripheral Display Extension for Immersive Head-Mounted Displays}, author = {Paul Lubos and Gerd Bruder and Oscar Ariza and Frank Steinicke}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {1--4}, organization = {ACM}, abstract = {Peripheral vision in immersive virtual environments is important for application fields that require high spatial awareness and veridical impressions of three-dimensional spaces. Head-mounted displays (HMDs), however, use displays and optical elements in front of a user's eyes, which often do not natively support a wide field of view to stimulate the entire human visual field. Such limited visual angles are often identified as causes of reduced navigation performance and sense of presence. In this paper we present an approach to extend the visual field of HMDs towards the periphery by incorporating additional optical LED elements structured in an array, which provide additional low-resolution information in the periphery of a user's eyes. We detail our approach, technical realization, and present an experiment, in which we show that such far peripheral stimulation can increase subjective estimates of presence, and has the potential to change user behaviour during navigation in a virtual environment.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BAOL16, title = {CAVE Size Matters: Effects of Screen Distance and Parallax on Distance Estimation in Large Immersive Display Setups}, author = {Gerd Bruder and Fernando Argelaguet Sanz and Anne-Héléne Olivier and Anatole Lécuyer}, year = {2016}, date = {2016-01-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {25}, number = {1}, pages = {1--16}, abstract = {When walking within a CAVE-like system, accommodation distance, parallax and angular resolution vary according to the distance between the user and the projection walls which can alter spatial perception. As these systems get bigger, there is a need to assess the main factors influencing spatial perception in order to better design immersive projection systems and virtual reality applications. In this article we present two experiments which analyze distance perception when considering the distance towards the projection screens and parallax as main factors. Both experiments were conducted in a large immersive projection system with up to ten meter interaction space. The first experiment showed that both the screen distance and parallax have a strong asymmetric effect on distance judgments. We observed increased underestimation for positive parallax conditions and slight distance overestimation for negative and zero parallax conditions. The second experiment further analyzed the factors contributing to these effects and confirmed the observed effects of the first experiment with a high-resolution projection setup providing twice the angular resolution and improved accommodative stimuli. In conclusion, our results suggest that space is the most important characteristic for distance perception, optimally requiring about 6 to 7-meter distance around the user, and virtual objects with high demands on accurate spatial perception should be displayed at zero or negative parallax.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BHJORSLBS16, title = {Edutainment & Engagement at Exhibitions: A Case Study of Gamification in the Historic Hammaburg Model}, author = {Julia Braeker and Steffen Haesler and Christoph Jahnke and Karen Obernesser and Tino Raupp and Jonathan Stapf and Paul Lubos and Gerd Bruder and Frank Steinicke}, editor = {Wolfgang Prinz and Jan Borchers and Matthias Jarke}, year = {2016}, date = {2016-01-01}, booktitle = {Mensch und Computer 2016 - Tagungsband}, pages = {1--9}, abstract = {Gamification in the context of interactive exhibitions has enormous potential to attract visitors and improve their engagement, flow, and learning. This paper describes a case study in which we use game-design elements for an interactive and collaborative exploration of a virtual exhibition. The goal is to collaboratively explore the possibilities of a multiplayer game using different user interfaces and input devices in the same environment. The case study was conducted using a virtual 3D model of the “Hammaburg”, which is a medieval castle of the 9th century. The idea of the multiplayer exhibition consists of a two-player game. One player is using a touch-table or other touch input devices, whereas the other player is using an immersive head-mounted display (HMD), combined with a game controller to navigate through the virtual environment (VE). Both players can interactively explore the VE while playing a mini-game together. We performed a user study to evaluate the game concepts. The results suggest that communication between the players—both spoken and technologically supported—is a challenging task, and seems especially difficult for the HMD player. Furthermore, this paper proposes a more specific exploration of other possible settings focusing on the communication of the players.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BHHNSLB16, title = {Evaluation of Two Leaning-Based Control Mechanisms in a Dragon Riding Interface}, author = {Pauline Bimberg and Philipp Heidenreich and Julia Hertel and Franziska Neu and Britta Schulte and Paul Lubos and Gerd Bruder}, editor = {Thies Pfeiffer and Julia Fröhlich and Rolf Kruse (Hg.)}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {109--116}, publisher = {Shaker Verlag}, abstract = {In this paper we present a dragon rider 3D flight interface which allows users to intuitively control the movement of a fantasy dragon in a virtual world by leaning their body in a seated position. We introduce two different approaches to measure the user's leaning state in the real world, one using a Wii Balance Board to measure the distribution of the user's weight on the seat and the other using a Kinect to track the user's upper bodyskeleton. We compared the two methods in an experiment focusing on the usability, sense of presence and efficiency in a 3D flight task in a virtual environment presented on a headmounted display. While both approaches were considered as intuitive by our participants, our results indicate that tracking the user's upper body with a Kinect was preferred by the participants and estimated as more usable than via shifts in the user's weight.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{WAFNRVLB16, title = {Exploring Augmented Perception: A User Acceptance Study}, author = {Daniel Waller and Christina Albers and Philipp Feuerbach and Marta Nevermann and Marc Richter and Stephanie Vogt and Paul Lubos and Gerd Bruder}, editor = {Thies Pfeiffer and Julia Fröhlich and Rolf Kruse (Hg.)}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {37--44}, publisher = {Shaker Verlag}, abstract = {Research into augmented reality (AR) focused on augmented perception, i. e.,changes to the visual perception of the environment, has not received as much attention as traditional AR applications focused on incorporating virtual objects or information into the user's view. This paper presents an assessment of the usefulness of vision-enhancing augmented reality (AR) for performing everyday tasks. Using virtual reality (VR) head-mounted displays we simulated different situations in which we augmented the visual perception of the user with heat vision, sound vision and night vision. Twenty participants were divided into two experimental and a control group. They were told to solve different everyday tasks using either natural vision or the different enhancement methods, which participants could switch between. We then observed how they performed in terms of vision switches and time needed. Participants took questionnaires before and after the simulation to assess simulator sickness, frustration levels and qualitative data. Our study shows that participants embraced the enhanced vision for solving everyday tasks and solved the given tasks significantly faster using the simulated augmented perception techniques. The results merit further research in implementing enhanced vision with AR technologies.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBS16a, title = {Illusion of Depth in Spatial Augmented Reality}, author = {Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBS16a.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the IEEE VR Workshop on Perceptual and Cognitive Issues in AR (PERCAR)}, pages = {1--6}, abstract = {Spatial augmented reality (SAR) is an emerging paradigm that differs from its origin, the traditional augmented reality (AR), in many regards. While traditional AR is a well-studied field of research, the characteristic features of SAR and their implications on the perception of spatial augmented environments have not been analyzed so far. In this paper, we present one of the first studies, which investigates the perceived spatial relationships between a user and their SAR environment. The results indicate that perceived depth of real-world objects can be manipulated by projecting illusions, such as color or blur effects, onto their surfaces. For the purpose of evaluating and comparing the illusions of interest, we developed a prototypic setup for conducting perceptual SAR experiments. Since this testing environment differs significantly from its counterparts in virtual and augmented reality, we also discuss potential challenges, which arise from the nature of SAR experiments.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SB16, title = {Im Selbstversuch 24 Stunden in der virtuellen Realitaet}, author = {Frank Steinicke and Gerd Bruder}, year = {2016}, date = {2016-01-01}, journal = {Jahrbuch immersiver Medien}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{AFLFSBS16, title = {Inducing Body-Transfer Illusions in VR by Providing Brief Phases of Visual-Tactile Stimulation}, author = {Oscar Ariza and Jann Philipp Freiwald and Nadine Laage and Michaela Feist and Mariam Salloum and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/AFLFSBS16-optimized.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {61--68}, abstract = {Current developments in the area of virtual reality (VR) allow numerous users to experience immersive virtual environments (VEs) in a broad range of application fields. In the same way, some research has shown novel advances in wearable devices to provide vibrotactile feedback which can be combined with low-cost technology for hand tracking and gestures recognition. The combination of these technologies can be used to investigate interesting psychological illusions. For instance, body-transfer illusions, such as the rubber-hand illusion or elongated-arm illusion, have shown that it is possible to give a person the persistent illusion of body transfer after only brief phases of synchronized visual-haptic stimulation. The motivation of this paper is to induce such perceptual illusions by combining VR, vibrotactile and tracking technologies, offering an interesting way to create new spatial interaction experiences centered on the senses of sight and touch. We present a technology framework that includes a pair of self-made gloves featuring vibrotactile feedback that can be synchronized with audio-visual stimulation in order to reproduce body-transfer illusions in VR. We present in detail the implementation of the framework and show that the proposed technology setup is able to induce the elongated-arm illusion providing automatic tactile stimuli, instead of the traditional approach based on manually synchronized stimulation.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS16, title = {Scale Matters! Analysis of Dominant Scale Estimation in the Presence of Conflicting Cues in Multi-Scale Collaborative Virtual Environments}, author = {Eike Langbehn and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS16.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {211--220}, abstract = {Multi-scale collaborative virtual environments (MCVEs) provide an important platform for many 3D application domains as they allow several users to cooperate in a virtual environment (VE) at different scale levels, ranging from magnified detail views to minified overall views. However, in such MCVEs, the natural relations between a user's self-representation, i.e., her virtual body, and the environment in terms of size, scale, proportion, capabilities, or affordances are subject to change during the interaction. In this paper we describe how the type of the environment, virtual self-representation of our body, as well as presence of other avatars affects our estimation of dominant scale, i.e., the scale level relative to which we make spatial judgments, plan actions and interpret other users' actions in MCVEs. We present a pilot study, which highlights the problem domain, and two psychophysical experiments, in which we analyzed how the different factors in MCVEs affect the estimation of dominant scale and thus shape perception and action in MCVEs. Our results show an effect of the above-mentioned aspects on the estimation of dominant scale. In particular, our results show interpersonal differences as well as a group effect, which reveals that participants estimated the common scale level of a group of other avatars as dominant scale, even if the participant's own scale or the environment scale deviated from the other avatars' common scale. We discuss implications and guidelines for the development of MCVEs.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS16a, title = {[POSTER] Subliminal Reorientation and Repositioning in Virtual Reality During Eye Blinks}, author = { Eike Langbehn and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS16a.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of ACM Symposium on Spatial User Interaction (Poster)}, pages = {213}, abstract = {Locomotion in Immersive Virtual Environments (IVEs) is one of the most basic interactions, while human walking is the most natural user-interface for this. Obviously, this technique is limited by the available physical space. Redirected Walking (RDW) wants to overcome this issue by subliminal redirection of the user inside the physical space. Traditional RDW algorithms need a circle with a radius of 22m to allow the user the exploration of an infinite virtual world. Because this is still too large to fit in a room-scale setup, we have to optimize detection thresholds and algorithms. Bolte et al. already examined reorientation and repositioning during saccades and showed that a subtle manipulation is possible. In this poster we describe how we investigated reorientation and repositioning of the user in the virtual world during eye blinks. Furthermore, we present an experimental setup for evaluating detection thresholds of reorientation and repositioning during eye blinks. And we indicate first impressions of the perception and the usability.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBAS16a, title = {Touching the Sphere: Leveraging Joint-Centered Kinespheres for Spatial User Interaction}, author = { Paul Lubos and Gerd Bruder and Oscar Ariza and Frank Steinicke}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of ACM Symposium on Spatial User Interaction (SUI)}, pages = {13--22}, abstract = {Designing spatial user interfaces for virtual reality (VR) applications that are intuitive, comfortable and easy to use while at the same time providing high task performance is a challenging task. This challenge is even harder to solve since perception and action in immersive virtual environments differ significantly from the real world, causing natural user interfaces to elicit a dissociation of perceptual and motor space as well as levels of discomfort and fatigue unknown in the real world. In this paper, we present and evaluate the novel method to leverage joint-centered kinespheres for interactive spatial applications. We introduce kinespheres within arm's reach that envelope the reachable space for each joint such as shoulder, elbow or wrist, thus defining 3D interactive volumes with the boundaries given by 2D manifolds. We present a Fitts' Law experiment in which we evaluated the spatial touch performance on the inside and on the boundary of the main joint-centered kinespheres. Moreover, we present a confirmatory experiment in which we compared joint-centered interaction with traditional spatial head-centered menus. Finally, we discuss the advantages and limitations of placing interactive graphical elements relative to joint positions and, in particular, on the boundaries of kinespheres.}, note = {Best Paper Award}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{DHHSSLB16, title = {Virtual Reality Flight Interfaces inspired by Iron Man}, author = { Christian Dibbern and Julian Hettwer and Josephine Hoeltermann and Daniel Sinn and Sukhpreet Singh and Paul Lubos and Gerd Bruder}, editor = {Thies Pfeiffer and Julia Fröhlich and Rolf Kruse (Hg.)}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {97--108}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBRBLS16, title = {Visual Blur in Immersive Virtual Environments: Does Depth of Field or Motion Blur Affect Distance and Speed Estimation?}, author = { Eike Langbehn and Benjamin Bolte and Tino Raupp and Gerd Bruder and Markus Lappe and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBRBLS16-optimized.pdf}, year = {2016}, date = {2016-01-01}, booktitle = {Proceedings of ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {241--250}, abstract = {It is known for decades that users tend to significantly underestimate or overestimate distances or speed in immersive virtual environments (IVEs) compared to corresponding judgments in the real world. Although several factors have been identified in the past that could explain small portions of this effect, the main causes of these perceptual discrepancies still remain elusive. One of the factors that has received less attention in the literature is the amount of blur presented in the visual imagery, for example, when using a head-mounted display (HMD). In this paper, we analyze the impact of the visual blur effects depth-of-field and motion blur in terms of their effects on distance and speed estimation in IVEs. We conducted three psychophysical experiments in which we compared distance or speed estimation between the real world and IVEs with different levels of depth-of-field or motion blur. Our results indicate that the amount of blur added to the visual stimuli had no noticeable influence on distance and speed estimation even when high magnitudes of blur were shown. Our findings suggest that the human perceptual system is highly capable of extracting depth and motion information regardless of blur, and implies that blur can likely be ruled out as the main cause of these misperception effects in IVEs.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SBS16, title = {Who turned the clock? Effects of Manipulated Zeitgebers, Cognitive Load and Immersion on Time Estimation}, author = { Christian Schatzschneider and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBS16.pdf}, year = {2016}, date = {2016-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG), Special Issue on IEEE Virtual Reality (VR)}, pages = {1--9}, abstract = {Current virtual reality (VR) technologies have enormous potential to allow humans to experience computer-generated immersive virtual environments (IVEs). Many of these IVEs support near-natural audiovisual stimuli similar to the stimuli generated in our physical world. However, decades of VR research have been devoted to exploring and understand differences between perception and action in such IVEs compared to real-world perception and action. Although, significant differences have been revealed for spatiotemporal perception between IVEs and the physical world such as distance underestimation, there is still a scarcity of knowledge about the reasons for such perceptual discrepancies, in particular regarding the perception of temporal durations in IVEs. In this article, we explore the effects of manipulated zeitgebers, cognitive load and immersion on time estimation as yet unexplored factors of spatiotemporal perception in IVEs. We present an experiment in which we analyze human sensitivity to temporal durations while experiencing an immersive head-mounted display (HMD) environment. We found that manipulations of external zeitgebers caused by a natural or unnatural movement of the virtual sun had a significant effect on time judgments. Moreover, using the dual-task paradigm the results show that increased spatial and verbal cognitive load resulted in a significant shortening of judged time as well as an interaction with the external zeitgebers. Finally, we discuss the implications for the design of near-natural computer-generated virtual worlds.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @article{Wehrer2015, title = {PhyloPen: Phylogenetic Tree Browsing Using a Pen and Touch Interface}, author = {Anthony Wehrer and Andrew Yee and Curtis Lisle and Charles Hughes}, url = {http://currents.plos.org/treeoflife/article/phylopen-phylogenetic-tree-browsing-using-a-pen-and-touch-interface/}, doi = {doi: 10.1371/currents.tol.d6d666469fc1942c665cb895b2305167.}, year = {2015}, date = {2015-11-23}, urldate = {2017-01-31}, journal = {PLOS Currents Tree of Life}, edition = {Edition 1}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Kim2015ab, title = {Maintaining and Enhancing Human-Surrogate Presence in Augmented Reality}, author = {Kangsoo Kim and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Kim2015ab.pdf}, year = {2015}, date = {2015-09-01}, booktitle = {Proceedings of IEEE ISMAR 2015 Workshop on Human Perception and Psychology in Augmented Reality}, keywords = {A-gfw, A-kk, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Kim2015aa, title = {Expectancy Violations Related to a Virtual Human's Joint Gaze Behavior in Real-Virtual Human Interactions}, author = {Kangsoo Kim and Arjun Nagendran and Jeremy Bailenson and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Kim2015aa.pdf}, year = {2015}, date = {2015-05-01}, journal = {Proceedings 28th Annual Conference on Computer Animation and Social Agents (CASA 2015)}, pages = {5--8}, keywords = {A-gfw, A-kk, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Hochreiter2015aa, title = {Touch Sensing on Non-Parametric Rear-Projection Surfaces: A Physical-Virtual Head for Hands-On Healthcare Training}, author = {Jason Hochreiter and Salam Daher and Arjun Nagendran and Laura Gonzalez and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Hochreiter2015aa.pdf}, year = {2015}, date = {2015-03-01}, journal = {Proceedings of IEEE Virtual Reality 2015}, pages = {69--74}, address = {Arles, France}, keywords = {A-gfw, A-jh, A-sd, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @incollection{Hughes2015aa, title = {Applications of Avatar Mediated Interaction to Teaching, Training, Job Skills and Wellness}, author = {Charles Hughes and Arjun Nagendran and Lisa Dieker and Michael Hynes and Gregory Welch}, editor = {Guido Brunnett and Sabine Coquillart and Robert van Liere and Gregory Welch and Libor Váša}, url = {http://dx.doi.org/10.1007/978-3-319-17043-5_8 https://sreal.ucf.edu/wp-content/uploads/2017/02/Hughes2015aa.pdf}, doi = {10.1007/978-3-319-17043-5_8}, isbn = {978-3-319-17042-8}, year = {2015}, date = {2015-01-01}, booktitle = {Virtual Realities}, volume = {8844}, pages = {133-146}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {incollection} } @incollection{Nagendran2015aa, title = {Technical Report: Exploring Human Surrogate Characteristics}, author = {Arjun Nagendran and Gregory Welch and Charles Hughes and Remo Pillat}, editor = {Guido Brunnett and Sabine Coquillart and Robert van Liere and Gregory Welch and Libor Váša}, url = {http://dx.doi.org/10.1007/978-3-319-17043-5_12 https://sreal.ucf.edu/wp-content/uploads/2017/02/Nagendran2015aa_red.pdf}, doi = {10.1007/978-3-319-17043-5_12}, isbn = {978-3-319-17042-8}, year = {2015}, date = {2015-01-01}, booktitle = {Virtual Realities}, volume = {8844}, pages = {215-228}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {incollection} } @article{Vasquez2015aa, title = {Virtual Learning Environments for Students with Disabilities: A Review and Analysis of The Empirical Literature and Two Case Studies.}, author = {Eleazar Vasquez III and Arjun Nagendran and Gregory Welch and Matthew T. Marino and Darin E. Hughes and Aaron Koch and Lauren Delisio}, url = {https://login.ezproxy.net.ucf.edu/login?auth=shibb&url=http://search.ebscohost.com/login.aspx?direct=true&db=eft&AN=110556458&site=eds-live&scope=site https://sreal.ucf.edu/wp-content/uploads/2017/02/Vasquez2015aa.pdf}, issn = {87568705}, year = {2015}, date = {2015-01-01}, journal = {Rural Special Education Quarterly}, volume = {34}, number = {3}, pages = {26--32}, abstract = {Students with autism spectrum disorder (ASD) show varying levels of impairment in social skills situations. Interventions have been developed utilizing virtual environments (VEs) to teach and improve social skills. This article presents a systematic literature review of peer-reviewed journal articles focusing on social interventions in VEs involving K-12th grade students with ASD. This exhaustive analysis across four major online databases was guided by operational terms related to intervention type and K-12 students with ASD. The empirical search yielded a very narrow body of literature (n= 19) on the use of VEs as social skill interventions for students with ASD. Two case study examples of experiments exploring the use of VEs and students with ASD are presented to illustrate possible applications of this technology. [ABSTRACT FROM AUTHOR]}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Zhang2015aa, title = {Kalman Filters for Dynamic and Secure Smart Grid State Estimation}, author = {Jinghe Zhang and Greg Welch and Naren Ramakrishnan and Saifur Rahman}, url = {http://dx.doi.org/10.1007/s40903-015-0009-6 https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2015aa.pdf}, doi = {10.1007/s40903-015-0009-6}, issn = {2363-6912}, year = {2015}, date = {2015-01-01}, journal = {Intelligent Industrial Systems}, volume = {1}, number = {1}, pages = {29--36}, publisher = {Springer Singapore}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Zhou2015aa, title = {Dynamic State Estimation of a Synchronous Machine using PMU Data: A Comparative Study}, author = {Ning Zhou and Da Meng and Zhenyu Huang and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhou2015aa.pdf}, doi = {10.1109/TSG.2014.2345698}, year = {2015}, date = {2015-01-01}, journal = {IEEE Transactions on Smart Grid}, volume = {6}, number = {1}, pages = {450--460}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @book{Brunnett2015, title = {Virtual Realities : International Dagstuhl Seminar, Dagstuhl Castle, Germany, June 9-14, 2013, Revised selected papers}, editor = {Guido Brunnett and Sabine Coquillart and Robert van Liere and Gregory Welch and Libor Váša}, url = {http://link.springer.com/book/10.1007/978-3-319-17043-5}, doi = {10.1007/978-3-319-17043-5}, year = {2015}, date = {2015-01-01}, volume = {8844 2015}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {book} } @inbook{Dieker2015, title = {Mixed reality environments in teacher education: Development and future applications}, author = {Lisa Dieker and Benjamin Lignugaris-Kraft and Michael Hynes and Charles Hughes}, editor = {B. Collins and B. Ludlow}, year = {2015}, date = {2015-01-01}, booktitle = {Distance Education Delivery: Preparing Special Educators in and for Rural Areas}, publisher = {American Council for Rural Special Educators}, address = {Morgantown, WV}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Barmaki2015, title = {A Case Study to Track Teacher Gestures and Performance in a Virtual Learning Environment}, author = { Roghayeh Barmaki and Charles E. Hughes}, url = {http://doi.acm.org/10.1145/2723576.2723650}, doi = {10.1145/2723576.2723650}, isbn = {978-1-4503-3417-4}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the Fifth International Conference on Learning Analytics And Knowledge}, pages = {420--421}, publisher = {ACM}, address = {Poughkeepsie, New York}, series = {LAK '15}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Barmaki2015b, title = {Providing Real-time Feedback for Student Teachers in a Virtual Rehearsal Environment}, author = { Roghayeh Barmaki and Charles E. Hughes}, url = {http://doi.acm.org/10.1145/2818346.2830604}, doi = {10.1145/2818346.2830604}, isbn = {978-1-4503-3912-4}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the 2015 ACM on International Conference on Multimodal Interaction}, pages = {531--537}, publisher = {ACM}, address = {Seattle, Washington, USA}, series = {ICMI '15}, note = {Grand Challenge People’s Choice Award}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBS15, title = {A Layer-based 3D Virtual Environment for Architectural Collaboration}, author = { Susanne Schmidt and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBS15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the EuroVR Conference}, pages = {1--6}, abstract = {Architectural design processes involve a variety of users with different levels of expertise such as architects, engineers, investors or end customers. An efficient process requires all involved parties to obtain a common understanding of the architectural models and problems to be discussed. This is an ambitious task as architects as well as other involved parties often need to work with two-dimensional (2D) floor plans. While these plans are meaningful and easy to interpret for professionals, ordinary users often face problems when deducing three-dimensional (3D) properties of a building. In this paper we address this problem by introducing an immersive virtual environment (VE) for collaborative exploration of virtual architectural models. We explore a layer-based visualization method, which stacks 2D floor plans in space providing a simple 3D impression without actually using a 3D model. Based on architectural work processes we developed a user interface including two registered representations of the same building. Our user interface allows an architect to specify a region of interest within a 3D overall view while other participants can follow his perspective in a second 2D view. In our setup the virtual building is displayed on two separate walls of an L-shaped projection system.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SDBS15, title = {A Mobile Interactive Mapping Application for Spatial Augmented Reality On The Fly}, author = { Susanne Schmidt and Silvan D{"a}hn and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SDBS15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {1--9}, abstract = {Observing the evolution of virtual reality (VR) and augmented reality (AR) applications, a shift from professional users towards a broader public can be noticed in the last few years. This trend creates a demand for intuitive and easy-to-use tools that support people in utilizing VR or AR for their own projects. In this paper we present an open source tool, which we developed to allow non-professional users to project images onto the surface of real-world objects on the fly. We designed a web application that runs both on mobile and desktop devices and therefore can be used in a versatile and flexible manner. We conducted a user study to compare the usability and performance of the mobile and the desktop version of our tool. The results show that users prefer the mobile version for spatially challenging projection tasks.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{KBS15a, title = {[POSTER] Analyses of Spatial Ballistic Movements for Prediction of Targets in Reach to Grasp Tasks}, author = { Dennis Krupke and Gerd Bruder and Frank Steinicke}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of Eurographics Symposium on Virtual Environments (Poster)}, abstract = {Ballistic and correction phases in hand movement trajectories in reach to grasp tasks are recorded for further categorization and analysis in order to create a predictor of target objects. The results suggest that the index of difficulty (ID), according to Fitts’ Law, has no influence on the speed of reaching movements, but seems to determine the shape of the velocity versus time relation in the virtual experiment (VE). Closed-loop and open-loop conditions in ballistic aiming movements result in similar effects between maximum speed and distance of objects. The results provide important findings for interaction with 3D objects as well as human-robot collaboration, which allows for more robust and efficient interaction techniques in real-time scenarios.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BLS15, title = {Cognitive Resource Demands of Redirected Walking}, author = { Gerd Bruder and Paul Lubos and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BLS15.pdf}, year = {2015}, date = {2015-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, number = {4}, pages = {539--544}, abstract = {Redirected walking allows users to walk through a large-scale immersive virtual environment (IVE) while physically remaining in a reasonably small workspace. Therefore, manipulations are applied to virtual camera motions so that the user's self-motion in the virtual world differs from movements in the real world. Previous work found that the human perceptual system tolerates a certain amount of inconsistency between proprioceptive, vestibular and visual sensation in IVEs, and even compensates for slight discrepancies with recalibrated motor commands. Experiments showed that users are not able to detect an inconsistency if their physical path is bent with a radius of at least 22 meters during virtual straightforward movements. If redirected walking is applied in a smaller workspace, manipulations become noticeable, but users are still able to move through a potentially infinitely large virtual world by walking. For this semi-natural form of locomotion, the question arises if such manipulations impose cognitive demands on the user, which may compete with other tasks in IVEs for finite cognitive resources. In this article we present an experiment in which we analyze the mutual influence between redirected walking and verbal as well as spatial working memory tasks using a dual-tasking method. The results show an influence of redirected walking on verbal as well as spatial working memory tasks, and we also found an effect of cognitive tasks on walking behavior. We discuss the implications and provide guidelines for using redirected walking in virtual reality laboratories.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BAOL15, title = {Distance Estimation in Large Immersive Projection Systems, Revisited}, author = { Gerd Bruder and Fernando Argelaguet Sanz and Anne-Héléne Olivier and Anatole Lécuyer}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BAOL15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {27--32}, abstract = {When walking within an immersive projection environment, accommodation distance, parallax and angular resolution vary according to the distance between the user and the projection walls which can influence spatial perception. As CAVE-like virtual environments get bigger, accurate spatial perception within the projection setup becomes increasingly important for application domains that require the user to be able to naturally explore a virtual environment by moving through the physical interaction space. In this paper we describe an experiment which analyzes how distance estimation is biased when the distance to the screen and parallax vary. The experiment was conducted in a large immersive projection setup with up to ten meter interaction space. The results showed that both the screen distance and parallax have a strong asymmetric effect on distance judgments. We found an increased distance underestimation for positive parallax conditions. In contrast, we found less distance overestimation for negative and zero parallax conditions. We conclude the paper discussing the results with view on future large immersive projection environments.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LEGVBS15, title = {Evaluation of an Omnidirectional Walking-in-Place User Interface with Virtual Locomotion Speed Scaled by Forward Leaning Angle}, author = { Eike Langbehn and Tobias Eichler and Sobin Ghose and Kai {von Luck} and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LEGVBS15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {149--160}, abstract = {Virtual locomotion is an enabling ability for many tasks in virtual environments (VEs) and denotes the most common form of interaction with VEs. In this paper we present a novel omnidirectional walking-in-place (WIP) locomotion system, which we designed to work in small laboratory environments and is based entirely on consumer hardware. We present our hardware and software solution to 360 degrees omnidirectional tracking based on multiple Kinects and an Oculus Rift head-mounted display (HMD). Using this novel setup we improved on the related work by evaluating leaning as a novel parameter of WIP interfaces. Inspired by observations of changing leaning angles during fast or slow locomotor movements in the real world, we present the Leaning-Amplified-Speed Walking-in-Place (LAS-WIP) user interface in this paper. We present the results of an experiment in which we show that leaning angle can have a positive effect on subjective estimates of self-motion perception and usability, which provides novel vistas for future research.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS15, title = {Evaluation von Buttons im Kontext des Gestaltungsstils Flat Design}, author = { Malte L{"u}cken and Gerd Bruder and Frank Steinicke}, year = {2015}, date = {2015-01-01}, booktitle = {Mensch und Computer 2015}, pages = {1--4}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LABDSK15, title = {HoverSpace: Analyses of the Perceived Spatial Affordances of Hover Interaction Above Tabletop Surfaces}, author = { Paul Lubos and Oscar Ariza and Gerd Bruder and Florian Daiber and Frank Steinicke and Antonio Krúger}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LABDSK15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Human-Computer Interaction - INTERACT 2015}, volume = {9298}, pages = {259--277}, publisher = {Springer}, abstract = {Recent developments in the area of stereoscopic displays and tracking technologies have paved the way to combine touch interaction on interactive surfaces with spatial interaction above the surface of a stereoscopic display. This holistic design space supports novel affordances and user experiences during touch interaction, but also induce challenges to the interaction design. In this paper we introduce the concept of hover interaction for such setups. Therefore, we analyze the non-visual volume above a virtual object, which is perceived as the corresponding hover space for that object. The results show that the users' perceptions of hover spaces can be categorized into two groups. Either users assume that the shape of the hover space is extruded and scaled towards their head, or along the normal vector of the interactive surface. We provide a corresponding model to determine the shapes of these hover spaces, and confirm the findings in a practical application. Finally, we discuss important implications for the development of future touch-sensitive interfaces.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS15b, title = {[POSTER] Moving Towards Natural Interaction Between Multiscale Avatars in Multi-User Virtual Environments}, author = { Eike Langbehn and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS15b.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Eurographics Symposium on Virtual Environments (Poster)}, pages = {1--2}, abstract = {Multiscale collaborative virtual environments (MCVEs) are interesting for many application domains as they allow multiple users to interact with virtual worlds and themselves at different scales. However, natural interaction in such environments is often quite difficult, since the natural relations between humans and the environment in terms of body size, capabilities, affordances, personal space and interpersonal space are changed. In this poster we describe this penomenon and present an experimental setup with multiscale avatars in a shared virtual world that supports full-body awareness. We indicate first impressions of the perception of one’s size relative to the scale of the environment and the scale of others.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{KLBSZ15, title = {Natural 3D Interaction Techniques for Locomotion with Modular Robots}, author = {Dennis Krupke and Paul Lubos and Gerd Bruder and Frank Steinicke and Jianwei Zhang}, year = {2015}, date = {2015-01-01}, booktitle = {Mensch und Computer 2015}, pages = {1--10}, abstract = {Defining 3D movements of modular robots is a challenging task, which is usually addressed with computationally expensive algorithms that aim to create self-propelling locomotion. So far only few user interfaces exist which allow a user to naturally interact with a modular robot in real time. In this paper we present two approaches for 3D user interfaces for intuitive definition of 3D movements of a modular chain-like robot in the scope of an iterative design process. We present a comparative evaluation of the techniques, which shows that they can provide intuitive human-robot interaction via remote control for real-time guidance of modular robots to move through heavy terrains and pass obstacles. In particular, our results show that steering a robot's movements via rotational hand movements has benefits for challenging movement tasks compared to translational hand movements. We discuss the results and present lessons learned for steering user interfaces for modular robots.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BS15, title = {Perceptually-inspired and (Super-)Natural Interaction}, author = {Gerd Bruder and Frank Steinicke}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of IEEE Virtual Reality Lab Presentations (VR)}, pages = {1--2}, abstract = {The Human-Computer Interaction (HCI) research group explores perceptually-inspired and (super-)natural forms of interaction to seamlessly couple the space where the flat two-dimensional (2D) digital world meets the three dimensions we live in. The mission of the HCI group is to develop user interfaces, which allow users to intuitively experience and interact with three-dimensional (3D) virtual environments (VEs). In particular, research focuses on different modalities and forms of interaction in immersive virtual environments (IVEs) to support (super-)natural walking, touching, seeing and being, which are addressed in the scope of different research projects funded by the German Research Foundation.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ALSB15, title = {Ring-shaped Haptic Device with Vibrotactile Feedback Patterns to Support Natural Spatial Interaction}, author = { Oscar Ariza and Paul Lubos and Frank Steinicke and Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/ALSB15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of the ICAT-EGVE (International Conference on Artificial Reality and Telexistence and Eurographics Symposium on Virtual Environments) 2015}, pages = {175--181}, abstract = {Haptic feedback devices can be used to improve usability, performance and cognition in immersive virtual environments (IVEs) and have the potential to significantly improve the user’s virtual reality (VR) experience during natural interaction. However, there are only few affordable and reliable haptic devices that have a light-weight, unencumbering, simple and versatile form factor which does not prevent natural interaction with other objects or devices at the same time. In this paper we present such a ring-shaped wireless haptic feedback device, and we describe different vibrotactile signal patterns which can be used to provide proximity-based cues during 3D interaction with virtual objects. We present a usability study in which we evaluated the device and feedback patterns.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{AOBPL15, title = {Virtual Proxemics: Locomotion in the Presence of Obstacles in Large Immersive Projection Environments}, author = { Fernando Argelaguet Sanz and Anne-Héléne Olivier and Gerd Bruder and Julien Pettré and Anatole Lécuyer}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/AOBPL15.pdf}, year = {2015}, date = {2015-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {75--80}, abstract = {In this paper, we investigate obstacle avoidance behavior during real walking in a large immersive projection setup. We analyze the walking behavior of users when avoiding real and virtual static obstacles. In order to generalize our study, we consider both anthropomorphic and inanimate objects, each having his virtual and real counterpart. The results showed that users exhibit different locomotion behaviors in the presence of real and virtual obstacles, and in the presence of anthropomorphic and inanimate objects. Precisely, the results showed a decrease of walking speed as well as an increase of the clearance distance (i.e., the minimal distance between the walker and the obstacle) when facing virtual obstacles compared to real ones. Moreover, our results suggest that users act differently due to their perception of the obstacle: users keep more distance when the obstacle is anthropomorphic compared to an inanimate object and when the orientation of anthropomorphic obstacle is from the profile compared to a front position. We discuss implications on future large shared immersive projection spaces.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{LBS15a, title = {Influence of Comfort on 3D Selection Task Performance in Immersive Desktop Setups}, author = { Paul Lubos and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS15a.pdf}, year = {2015}, date = {2015-01-01}, journal = {Journal of Virtual Reality and Broadcasting (JVRB)}, volume = {12}, number = {2}, pages = {1--15}, abstract = {Immersive virtual environments (IVEs) have the potential to afford natural interaction in the three-dimensional (3D) space around a user. However, interaction performance in 3D mid-air is often reduced and depends on a variety of ergonomics factors, the user's endurance, muscular strength, as well as fitness. In particular, in contrast to traditional desktop-based setups, users often cannot rest their arms in a comfortable pose during the interaction. In this article we analyze the impact of comfort on 3D selection tasks in an immersive desktop setup. First, in a pre-study we identified how comfortable or uncomfortable specific interaction positions and poses are for users who are standing upright. Then, we investigated differences in 3D selection task performance when users interact with their hands in a comfortable or uncomfortable body pose, while sitting on a chair in front of a table while the VE was displayed on a head-mounted display (HMD). We conducted a Fitts' Law experiment to evaluate selection performance in different poses. The results suggest that users achieve a significantly higher performance in a comfortable pose when they rest their elbow on the table.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @patent{Gilson2014, title = {Methods for Electronic Directionality of Deep-Brain Stimulation}, author = {Richard Gilson and Greg Welch and Nizam Razack}, url = {https://patents.google.com/patent/US8849408B1/en?oq=US+%238%2c849%2c408 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=8849408.PN.&OS=PN/8849408&RS=PN/8849408}, year = {2014}, date = {2014-09-30}, number = {US 8849408B1}, location = {US}, abstract = {Methods, systems and devices to provide correction parameters for implanted electrodes by applying a cathode pulse to a bilateral implanted electrode while providing a synchronized anode on the opposite electrode. The electrical field can be “shaped” over space and time to reach more of the targeted area by selecting various combinations of active contacts. The cathode lead directs the electrical field to the target and the placement and number of anode contacts activated determines the electric field path and rate of dissipation based on vertical and horizontal distance and timing. The correction parameter can be applied to anode and cathode contacts on a single implanted lead. Each lead can have plural anode and cathode contacts each independently controllable. Active anodes and cathodes are statically or dynamically selected to generate a shaped electric field to reach the target.}, note = {Filed: 2013-01-04}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {patent} } @article{Zheng2014aa, title = {Pixel-Wise Closed-Loop Registration in Video-Based Augmented Reality}, author = {Feng Zheng and Dieter Schmalstieg and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zheng2014aa.pdf}, year = {2014}, date = {2014-09-01}, journal = {Mixed and Augmented Reality (ISMAR), 2014 IEEE International Symposium on}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @inbook{Lindgren2014, title = {Virtual environments as a tool for conceptual learning}, author = {Robb Lindgren and J. Michael Moshell and Charles Hughes}, editor = {Kelly S. Hale and Kay M. Stanney}, doi = {doi:10.1201/b17360-48}, isbn = {978-1-4665-1184-2}, year = {2014}, date = {2014-08-13}, booktitle = {Handbook of Virtual Environments: Design, Implementation, and Applications}, pages = {1043-1056}, publisher = {CRC Press}, edition = {2nd Edition}, chapter = {40}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @conference{Abich2014ab, title = {A Preliminary Evaluation of Human Surrogate Interaction}, author = {Julian Abich IV and Gerald Matthews and Lauren Reinerman-Jones and Gregory Welch and Stephanie Lackey and Charles Hughes and Arjun Nagendran}, year = {2014}, date = {2014-07-22}, booktitle = {HCI International 2014 (HCII2014)}, address = {Crete, Greece}, keywords = {A-ceh, A-gfw, SREAL}, pubstate = {published}, tppubtype = {conference} } @article{Sonnenwald2014aa, title = {Illuminating collaboration in emergency health care situations: paramedic-physician collaboration and 3D telepresence technology}, author = {Diane Sonnenwald and Hanna Söderholm and Gregory Welch and Bruce Cairns and James Manning and Henry Fuchs}, url = {[Available at urlhttp://InformationR.net/ir/19-2/paper618.html] https://sreal.ucf.edu/wp-content/uploads/2017/02/Sonnenwald2014aa.pdf}, year = {2014}, date = {2014-06-01}, journal = {Information Research}, volume = {19}, number = {2}, note = {[Available at urlhttp://InformationR.net/ir/19-2/paper618.html]}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Menozzi2014aa, title = {Development of Vision-aided Navigation for a Wearable Outdoor Augmented Reality System}, author = {Alberico Menozzi and Brian Clipp and Eric Wenger and Jared Heinly and Herman Towles and Jan-Michael Frahm and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Menozzi2014aa.pdf}, year = {2014}, date = {2014-05-01}, booktitle = {Proceedings of the IEEE/ION Position Location and Navigation Symposium}, pages = {760--772}, address = {Monterey, CA, USA}, organization = {IEEE/The Institute of Navigation}, keywords = {A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Dieker2014, title = {Learning from Virtual Students}, author = {Lisa Dieker and Carrie Straub and Charles Hughes and Michael Hynes and Stacey Hardin}, url = {http://www.ascd.org/publications/educational-leadership/may14/vol71/num08/Learning-from-Virtual-Students.aspx}, year = {2014}, date = {2014-05-01}, journal = {Educational Leadership: Professional Learning: Reimagined}, volume = {71}, number = {8}, pages = {54-58}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Zhang2014aa, title = {A Two-Stage Kalman Filter Approach for Robust and Real-Time Power System State Estimation}, author = {Jinghe Zhang and Greg Welch and Gary Bishop and Zhenyu Huang}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2014aa.pdf}, doi = {10.1109/TSTE.2013.2280246}, issn = {1949-3029}, year = {2014}, date = {2014-04-01}, journal = {IEEE Transactions on Sustainable Energy}, volume = {5}, number = {2}, pages = {629-636}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Dieker2014b, title = {The Potential of Simulated Environments in Teacher Education: Current and Future Possibilities}, author = {Lisa Dieker and Jacqueline Rodriguez and Benjamin Lignugaris-Kraft and Michael Hynes and Charles Hughes}, doi = {10.1177/0888406413512683}, year = {2014}, date = {2014-01-05}, journal = {Teacher Education and Special Education}, volume = {37}, number = {1}, pages = {21-33}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @incollection{Abich2014aa, title = {Good Enough Yet? A Preliminary Evaluation of Human-Surrogate Interaction}, author = {Julian Abich IV and Lauren Reinerman-Jones and Gerald Matthews and Gregory Welch and Stephanie Lackey and Charles Hughes and Arjun Nagendran }, editor = {Randall Shumaker and Stephanie Lackey}, url = {http://dx.doi.org/10.1007/978-3-319-07458-0_23 https://sreal.ucf.edu/wp-content/uploads/2017/02/Abich2014aa.pdf}, doi = {10.1007/978-3-319-07458-0_23}, isbn = {978-3-319-07457-3}, year = {2014}, date = {2014-01-01}, booktitle = {Virtual, Augmented and Mixed Reality. Designing and Developing Virtual and Augmented Environments}, volume = {8525}, pages = {239--250}, publisher = {Springer International Publishing}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {incollection} } @article{Ilie2014aa, title = {Online Control of Active Camera Networks for Computer Vision Tasks}, author = {Adrian Ilie and Greg Welch}, url = {http://doi.acm.org/10.1145/2530283 https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2014aa_red.pdf}, doi = {10.1145/2530283}, issn = {1550-4859}, year = {2014}, date = {2014-01-01}, journal = {ACM Trans. Sen. Netw.}, volume = {10}, number = {2}, pages = {25:1--25:40}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Nagendran2014aa, title = {A Unified Framework for Individualized Avatar-Based Interactions}, author = {Arjun Nagendran and Remo Pillat and Adam Kavanaugh and Greg Welch and Charles Hughes}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Nagendran2014aa.pdf}, doi = {http://dx.doi.org/10.1162/105474602760204327}, issn = {1054-7460}, year = {2014}, date = {2014-01-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {23}, number = {2}, publisher = {MIT Press}, address = {Cambridge, MA, USA}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Welch2014aa, title = {Mastering the Human Element of Immersive Training}, author = {Gregory Welch and Arjun Nagendran and Jeremy Bailenson and Charles Hughes and Pete Muller and Peter Squire}, url = {http://futureforce.navylive.dodlive.mil/files/2014/10/FF-Fall-2014_web.pdf https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2014aa.pdf}, year = {2014}, date = {2014-01-01}, journal = {Naval Science and Technology Future Force}, volume = {Fall 2014}, pages = {10--13}, keywords = {A-ceh, A-gfw, F-ONR, P-HSI, SREAL}, pubstate = {published}, tppubtype = {article} } @incollection{Walters2014, title = {Emerging from the Shadows: Japan Pavilion Promotional Materials at the 1964/65 New York World's Fair}, author = { Lori C. Walters}, editor = {Laura Hollengreen and Celia Pearce and Rebecca Rouse and Bobby Schweizer}, url = {http://dl.acm.org/citation.cfm?id=2811094.2811143}, isbn = {978-1-312-11587-3}, year = {2014}, date = {2014-01-01}, booktitle = {Meet Me at the Fair}, pages = {467--472}, publisher = {ETC Press}, address = {Pittsburgh, PA, USA}, chapter = {Chapter 8.2}, keywords = {}, pubstate = {published}, tppubtype = {incollection} } @inbook{Erbiceanu2014, title = {Synthesizing Virtual Character Behaviors from Interactive Digital Puppetry}, author = {Elena Erbiceanu and Daniel Mapes and Charles Hughes}, editor = {Joshua Tanenbaum and Magy Seif El-Nasr and Michael Nixon}, url = {http://press.etc.cmu.edu/files/Nonverbal-Communication-Tanenbaum_Seif-El-Nasr_Nixon-web.pdf http://press.etc.cmu.edu/content/nonverbal-communication-virtual-worlds-understanding-and-designing-expressive-characters }, isbn = {978-1-304-81204-9}, year = {2014}, date = {2014-01-01}, urldate = {2016-12-05}, booktitle = {Nonverbal Communication in Virtual Worlds}, pages = {269-287}, publisher = {ETC Press}, chapter = {16}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{Nagendran2014, title = {Design, control, and performance of the weed 6 wheel robot in the UK MOD grand challenge}, author = {Arjun Nagendran and William Crowther and Martin Turner and Alexander Lanzon and Robert Richardson}, url = {http://www.tandfonline.com/doi/abs/10.1080/01691864.2013.865298#aHR0cDovL3d3dy50YW5kZm9ubGluZS5jb20vZG9pL3BkZi8xMC4xMDgwLzAxNjkxODY0LjIwMTMuODY1Mjk4P25lZWRBY2Nlc3M9dHJ1ZUBAQDA=}, doi = {10.1080/01691864.2013.865298}, year = {2014}, date = {2014-01-01}, journal = {Advanced Robotics}, volume = {28}, number = {4}, pages = {203-218}, keywords = {SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{SB14, title = {A Self-Experimentation Report about Long-Term Use of Fully-Immersive Technology}, author = { Frank Steinicke and Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SB14-optimized.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of ACM Symposium on Spatial User Interaction (SUI)}, pages = {66--69}, abstract = {Virtual and digital worlds have become an essential part of our daily life, and many activities that we used to perform in the real world such as communication, e-commerce, or games, have been transferred to the virtual world nowadays. This transition has been addressed many times by science fiction literature and cinematographic works, which often show dystopic visions in which humans live their lives in a virtual reality (VR)-based setup, while they are immersed into a virtual or remote location by means of avatars or surrogates. In order to gain a better understanding of how living in such a virtual environment (VE) would impact human beings, we conducted a self-experiment in which we exposed a single participant in an immersive VR setup for 24 hours (divided into repeated sessions of two hours VR exposure followed by ten minutes breaks), which is to our knowledge the longest documented use of an immersive VEs so far. We measured different metrics to analyze how human perception, behavior, cognition, and motor system change over time in a fully isolated virtual world.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS14, title = {Analysis of Direct Selection in Head-Mounted Display Environments}, author = { Paul Lubos and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS14.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {11--18}, abstract = {The design of 3D user interfaces (3DUIs) for immersive head-mounted display (HMD) environments is an inherently difficult task. The fact that usually haptic feedback is absent and that visual body feedback is missing, hinders an efficient direct interaction with virtual objects. Moreover, the perceptual conflicts, such as double vision and space misperception, as well as the well-known vergence-accommodation mismatch further complicate the interaction, in particular with virtual objects floating in the virtual environment (VE). However, the potential benefits of direct and natural interaction offered by immersive virtual environments (IVEs) encourage the research in the field to create more efficient selection methods. Utilizing a Fitts' Law experiment, we analyzed the 3D direct selection of objects in the virtual 3D space as they might occur for 3D menus or floating objects in space. We examined the direct interaction space in front of the user and divided it into a set of interaction regions for which we compared different levels of selection difficulty.Our results indicate that the selection errors are highest along the view axis, less along the motion axis and marginal along the orthogonal plane. Based on these results we suggest some guidelines for the design of direct selection techniques in IVEs.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{LBS14b, title = {Are Four Hands better than Two? Bimanual Interaction for Quadmanual User Interfaces}, author = { Paul Lubos and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/LBS14b.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of ACM Symposium on Spatial User Interaction (SUI)}, pages = {123--126}, abstract = {The design of spatial user interaction for immersive virtual environments (IVEs) is an inherently difficult task. Missing haptic feedback and spatial misperception hinder an efficient direct interaction with virtual objects. Moreover, interaction performance depends on a variety of ergonomics factors, such as the user's endurance, muscular strength, as well as fitness. However, the potential benefits of direct and natural interaction offered by IVEs encourage research to create more efficient interaction methods. We suggest a novel way of 3D interaction by utilizing the fact that for many tasks, bimanual interaction shows benefits over one-handed interaction in a confined interaction space. In this paper we push this idea even further and introduce quadmanual user interfaces (QUIs) with two additional, virtual hands. These magic hands allow the user to keep their arms in a comfortable position yet still interact with multiple virtual interaction spaces. To analyze our approach we conducted a performance experiment inspired by a Fitts' Law selection task, investigating the feasibility of our approach for the natural interaction with 3D objects in virtual space.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{ZBS14, title = {Comparison of 2D and 3D GUI Widgets for Stereoscopic Multitouch Setups}, author = { David Zilch and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/ZBS14.pdf}, year = {2014}, date = {2014-01-01}, journal = {Journal of Virtual Reality and Broadcasting (JVRB)}, volume = {11}, number = {7}, pages = {1--10}, abstract = {Recent developments in the area of interactive entertainment have suggested to combine stereoscopic visualization with multi-touch displays, which has the potential to open up new vistas for natural interaction with interactive three-dimensional (3D) applications. However, the question arises how the user interfaces for system control in such 3D setups should be designed in order to provide an effective user experience. In this article we introduce 3D GUI widgets for interaction with stereoscopic touch displays. The design of the widgets was inspired to skeuomorphism and affordances in such a way that the user should be able to operate the virtual objects in the same way as their real-world equivalents. We evaluate the developed widgets and compared them with their 2D counterparts in the scope of an example application in order to analyze the usability of and user behavior with the widgets. The results reveal differences in user behavior with and without stereoscopic display during touch interaction, and show that the developed 2D as well as 3D GUI widgets can be used effectively in different applications.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BSN14, title = {[POSTER] Immersive Point Cloud Virtual Environments}, author = { Gerd Bruder and Frank Steinicke and Andreas Núchter}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSN14.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {161--162}, abstract = {Today's three-dimensional (3D) virtual environments (VEs) are usually based on textured polygonal 3D models, which represent the appearance and geometry of the virtual world. However, some application domains require other graphical paradigms, which are currently not adequately addressed by 3D user interfaces. We introduce a novel approach for a technical human-robot telepresence setup that allows a human observer to explore a VE, which is a 3D reconstruction of the real world based on point clouds. Such point cloud virtual environments (PCVEs) represent the external environment, and are usually acquired by 3D scanners. We present an application scenario, in which a mobile robot captures 3D scans of a terrestrial environment, which are automatically registered to a coherent PCVE. This virtual 3D reconstruction is displayed in an immersive virtual environment (IVE) in which a user can explore the PCVE. We explain and describe the technical setup, which opens up new vistas of presenting a VE as points rather than a polygonal representation.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BLS14, title = {[POSTER] Safe-&-Round: Bringing Redirected Walking to Small Virtual Reality Laboratories}, author = { Gerd Bruder and Paul Lubos and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BLS14.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {154-154}, abstract = {Walking is usually considered the most natural form for selfmotion in a virtual environment (VE). However, the confined physical workspace of typical virtual reality (VR) labs often prevents natural exploration of larger VEs. Redirected walking has been introduced as a potential solution to this restriction, but corresponding techniques often induce enormous manipulations if the workspace is considerably small and lacks natural experiences therefore. In this poster we propose the Safe-&-Round user interface, which supports natural walking in a potentially infinite virtual scene while confined to a considerably restricted physical workspace. This virtual locomotion technique relies on a safety volume, which is displayed as a semi-transparent halfcapsule, inside which the user can walk without manipulations caused by redirected walking.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BS14, title = {[POSTER] Time Perception during Walking in Virtual Environments}, author = { Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BS14.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {67--68}, abstract = {A large body of literature has analyzed differences between perception in the real world and virtual environments (VE) in terms of space, distance and speed perception. So far, no empirical data has been collected for time misperception in immersive VEs to our knowledge. However, there is evidence that time perception can deviate from veridical judgments, for instance, due to visual or auditive stimulation related to motion misperception. In this work we evaluate time perception during walking motions with a pilot study in an immersive head-mounted display (HMD) environment. Significant differences between time judgments in the real and virtual environment could not be observed.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BS14a, title = {Threefolded Motion Perception During Immersive Walkthroughs}, author = { Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BS14a.pdf}, year = {2014}, date = {2014-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {177--185}, abstract = {Locomotion is one of the most fundamental processes in the real world, and its consideration in immersive virtual environments (IVEs) is of major importance for many application domains requiring immersive walkthroughs. From a simple physics perspective, such self-motion can be defined by the three components speed, distance, and time. Determining motions in the frame of reference of a human observer imposes a significant challenge to the perceptual processes in the human brain, and the resulting speed, distance, and time percepts are not always veridical. In previous work in the area of IVEs, these components were evaluated in separate experiments, i.e., using largely different hardware, software and protocols. In this paper we analyze the perception of the three components of locomotion during immersive walkthroughs using the same setup and similar protocols. We conducted experiments in an Oculus Rift head-mounted display (HMD) environment which showed that subjects largely underestimated virtual distances, slightly underestimated virtual speed, and we observed that subjects slightly overestimated elapsed time.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Nagendran2013aa, title = {AMITIES: Avatar-Mediated Interactive Training and Individualized Experience System}, author = {Arjun Nagendran and Remo Pillat and Adam Kavanaugh and Greg Welch and Charles Hughes}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Nagendran2013aa.pdf}, year = {2013}, date = {2013-10-01}, journal = {Proceedings of The 19th ACM Symposium on Virtual Reality Software and Technology (VRST2013)}, keywords = {A-ceh, A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Harmon2013, title = {Arbor: Comparative Analysis Workflows for the Tree of Life}, author = {Luke Harmon and Jeffery Baumes and Charles Hughes and Jorge Soberon and Chelsea Specht and Wesley Turner and Curtis Lisle and Robert Thacker}, url = {http://currents.plos.org/treeoflife/article/arbor-comparative-analysis-workflows-for-the-tree-of-life/}, doi = {10.1371/currents.tol.099161de5eabdee073fd3d21a44518dc}, year = {2013}, date = {2013-06-21}, urldate = {2017-01-31}, journal = {PLOS Currents Tree of Life}, edition = {Edition 1}, note = {last modified:2013 Jun 21}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Braeger2013, title = {Photometric display calibration for embedded MR environments}, author = {Steven Braeger and Yiyan Xiong and Charles Hughes}, doi = {10.1109/VR.2013.6549399 }, issn = {1087-8270}, year = {2013}, date = {2013-03-18}, booktitle = {2013 IEEE Virtual Reality (VR)}, pages = {135-136}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zheng2013aa, title = {A General Approach for Closed-Loop Registration in AR}, author = {Feng Zheng and Ryan Schubert and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zheng2013aa.pdf}, year = {2013}, date = {2013-03-01}, booktitle = {Proceedings of IEEE Virtual Reality 2013}, address = {Orlando, FL, USA}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Ilie2013aa, title = {Automated Camera Selection and Control for Better Training Support}, author = {Adrian Ilie and Greg Welch}, editor = {Dylan Schmorrow and Cali M. Fidopiastis}, url = {http://dx.doi.org/10.1007/978-3-642-39454-6_6 https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2013aa.pdf}, doi = {10.1007/978-3-642-39454-6_6}, isbn = {978-3-642-39453-9}, year = {2013}, date = {2013-01-01}, booktitle = {Foundations of Augmented Cognition}, volume = {8027}, pages = {50-59}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {incollection} } @article{Sadagic2013aa, title = {Smart instrumented training ranges: bringing automated system solutions to support critical domain needs}, author = {Amela Sadagic and Mathias Kölsch and Greg Welch and Chumki Basu and Chris Darken and Juan P. Wachs and Henry Fuchs and Herman Towles and Neil Rowe and Jan-Michael Frahm and Li Guan and Rakesh Kumar and Hui Cheng}, url = {http://dms.sagepub.com/content/10/3/327.abstract https://sreal.ucf.edu/wp-content/uploads/2017/02/Sadagic2013aa.pdf}, doi = {10.1177/1548512912472942}, year = {2013}, date = {2013-01-01}, journal = {The Journal of Defense Modeling and Simulation: Applications, Methodology, Technology}, volume = {10}, number = {3}, pages = {327-342}, abstract = {The training objective for urban warfare includes acquisition and perfection of a set of diverse skills in support of kinetic and non-kinetic operations. The US Marines (USMC) employ long-duration acted scenarios with verbal training feedback provided sporadically throughout the training session and at the end in a form of an after-action review (AAR). The inherent characteristic of training ranges for urban warfare is that they are the environments with a high level of physical occlusion, which causes many performances not to be seen by a group of instructors who oversee the training. We describe BASE-IT (Behavioral Analysis and Synthesis for Intelligent Training), a system in development that aims to automate capture of training data and their analysis, performance evaluation, and AAR report generation. The goal of this effort is to greatly increase the amount of observed behavior and improve the quality of the AAR. The system observes training with stationary cameras and personal tracking devices. It then analyzes movement and body postures, measures individual and squad-level performance, and compares it to standards and levels of performance expected in given situations. An interactive visualization component delivers live views augmented with real-time analytics and alerts; it also generates a personalized AAR review in a three-dimensional virtual or mixed reality environment, indexed by automatically extracted salient events and accompanied by summary statistics of unit performance. The approaches presented in the system have the potential to radically change the analysis and performance assessment on physical training ranges and ultimately this type of training itself.}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{brunnett2013, title = {Virtual Realities (Dagstuhl Seminar 13241)}, author = { Guido Brunnett and Sabine Coquillart and Robert Van Liere and Gregory F. Welch}, url = {http://drops.dagstuhl.de/volltexte/2013/4257/pdf/dagrep_v003_i006_p038_s13241.pdf}, year = {2013}, date = {2013-01-01}, journal = {Dagstuhl Reports}, volume = {3}, number = {6}, pages = {38-66}, publisher = {Schloss Dagstuhl-Leibniz-Zentrum fuer Informatik}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Hayes2013, title = {Ludic Learning: Exploration of TLE TeachLivE™ and Effective Teacher Training}, author = {Aleshia Hayes and Carrie Straub and Lisa Dieker and Charles Hughes and Michael Hynes}, doi = {10.4018/jgcms.2013040102}, year = {2013}, date = {2013-01-01}, journal = {International Journal of Gaming and Computer-Mediated Simulations (IJGCMS)}, volume = {5}, number = {2}, pages = {20-33}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Norris2013, title = {Randomized trial of a peer resistance skill-building game for Hispanic early adolescent girls}, author = {Anne E. Norris and Charles Hughes and Michael Hecht and Nilda Peragallo and David Nickerson}, url = {http://journals.lww.com/nursingresearchonline/Fulltext/2013/01000/Randomized_Trial_of_a_Peer_Resistance.6.aspx}, doi = {10.1097/NNR.0b013e318276138f}, year = {2013}, date = {2013-01-01}, journal = {Nursing Research}, volume = {62}, number = {1}, pages = {25-35}, edition = {2013 Edition}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Hayes2013b, title = {Perceived Presence's Role on Learning Outcomes in Mixed Reality Classroom of Simulated Students}, author = {Aleshia Hayes and Stacey Hardin and Charles Hughes}, editor = {Randall Shumaker}, doi = {10.1007/978-3-642-39420-1_16}, isbn = {978-3-642-39420-1}, year = {2013}, date = {2013-01-01}, booktitle = {Virtual, Augmented and Mixed Reality. Systems and Applications: 5th International Conference, VAMR 2013. Part of HCI International 2013 (HCII2013), Las Vegas, NV, July 21-26}, volume = {8022}, pages = {142-151}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hughes2013, title = {Mixed Reality Space Travel for Physics Learning}, author = {Darin E. Hughes and Shabnam Sabbagh and Robb Lindgren and J. Michael Moshell and Charles Hughes}, editor = {Randall Shumaker}, doi = {10.1007/978-3-642-39420-1_18}, isbn = {978-3-642-39420-1}, year = {2013}, date = {2013-01-01}, booktitle = {Virtual, Augmented and Mixed Reality. Systems and Applications: 5th International Conference, VAMR 2013. Part of HCI International 2013 (HCII2013), Las Vegas, NV, July 21-26}, volume = {8022}, pages = {162-169}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Walters2013, title = {ChronoLeap: The Great World’s Fair Adventure}, author = {Lori C. Walters and Darin E. Hughes and Manuel {Gertrudix Barrio} and Charles Hughes}, editor = {Randall Shumaker}, doi = {10.1007/978-3-642-39420-1_45}, isbn = {978-3-642-39420-1}, year = {2013}, date = {2013-01-01}, booktitle = {Virtual, Augmented and Mixed Reality. Systems and Applications: 5th International Conference, VAMR 2013. Part of HCI International 2013 (HCII2013), Las Vegas, NV, July 21-26}, volume = {8022}, pages = {426-435}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, series = {Lecture Note in Computer Science}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{BS13, title = {2.5D Touch Interaction on Stereoscopic Tabletop Surfaces}, author = {Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of Interactive Surfaces for Interaction with Stereoscopic 3D (ISIS3D)}, pages = {1--4}, abstract = {Recent developments in touch and display technologies have laid the groundwork to combine touch-sensitive display systems with stereoscopic three-dimensional (3D) display. Traditionally, touch-sensitive surfaces capture only direct contacts such that the user has to penetrate a visually perceived object with negative parallax to touch the 2D surface behind the object. Conversely, recent technologies support capturing nger positions in front of the display, enabling users to interact with intangible objects in mid-air 3D space. In previous works we compared such 2D touch and 3D mid-air interactions in a Fitts' Law experiment for objects with varying stereoscopical parallax. The results showed that within a small range above the surface 2D interaction is bene cial whereas for objects farther away 3D interaction is bene cial. For these reasons, we discuss the concept of 2.5D interaction for such setups and introduce corresponding widgets for interaction with stereoscopic touch displays by means of an example application.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{HBFKKKBS13, title = {Comparing 3D Interaction Performance in Comfortable and Uncomfortable Regions}, author = {Marina Hofmann and Ronja Bürger and Ninja Frost and Julia Karremann and Jule Keller-Bacher and Stefanie Kraft and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/HBFKKKBS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {3--14}, abstract = {Immersive virtual environments (IVEs) have the potential to afford natural interaction in the three-dimensional (3D) space around a user. While the available physical workspace can differ between IVEs, only a small region is located within arm's reach at any given moment. This interaction space is solely defined by the shape and posture of the user's body. Interaction performance in this space depends on a variety of ergonomics factors, the user's endurance, muscular strength, as well as fitness. In this paper we investigate differences in selection task performance when users interact with their hands in a comfortable or uncomfortable region around their body. In a pilot study we identified comfortable and uncomfortable interaction regions for users who are standing upright. We conducted a Fitts' Law experiment to evaluate selection performance in these different regions over a duration of about thirty minutes. Although, we could not find any significant differences in interaction performance between the two regions, we observed a trend that the extent of physical fitness of the users affects performance: Athletic users perform better than unathletic users. We discuss implications for natural interaction in IVEs.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{ZBSL13, title = {Design and Evaluation of 3D GUI Widgets for Stereoscopic Touch-Displays}, author = {David Zilch and Gerd Bruder and Frank Steinicke and Frank Lamack}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/ZBSL13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {37--48}, abstract = {Recent developments in the area of interactive entertainment have suggested to combine stereoscopic visualization with multi-touch displays, which has the potential to open up new vistas for natural interaction with interactive three-dimensional applications. However, the question arises how user interfaces for such setups should be designed in order to provide an effective user experience. In this paper we introduce 3D GUI widgets for interaction with stereoscopic touch displays. We have designed the widgets according to skeuomorph features and affordances. We evaluated the developed widgets in the scope of an example application in order to analyze the usability of and user behavior with this 3D user interface. The results reveal differences in user behavior with and without stereoscopic display during touch interaction, and show that the developed 3D GUI widgets can be used effectively in different applications.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSS13, title = {Effects of Visual Conflicts on 3D Selection Task Performance in Stereoscopic Display Environments}, author = {Gerd Bruder and Frank Steinicke and Wolfgang Stüerzlinger}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {115--118}, abstract = {Mid-air direct-touch interaction in stereoscopic display environments poses challenges to the design of 3D user interfaces. Not only is passive haptic feedback usually absent when selecting a virtual object displayed with positive or negative parallax relative to a display surface, but such setups also suffer from inherent visual conflicts, such as vergence/accommodation mismatches and double vision. In particular, if the user tries to select a virtual object with a finger or input device, either the virtual object or the user's finger will appear blurred, resulting in an ambiguity for selections that may significantly impact the user's performance. In this paper we evaluate the effect of visual conflicts for mid-air 3D selection performance within arm's reach on a stereoscopic table with a Fitts' Law experiment. We compare three different techniques with different levels of visual conflicts for selecting a virtual object: real hand, virtual offset cursor, and virtual offset hand. Our results show that the error rate is highest for the real hand condition and less for the virtual offset-based techniques. However, our results indicate that selections with the real hand resulted in the highest effective throughput of all conditions. This suggests that virtual offset-based techniques do not improve overall performance.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{JDSBS13, title = {Evaluation der ber}, author = {Björn Janich and Monique Dittrich and Milena Schlosser and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/JDSBS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {15--26}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BWBLS13, title = {Going With the Flow: Modifying Self-Motion Perception with Computer-Mediated Optic Flow}, author = { Gerd Bruder and Phil Wieland and Benjamin Bolte and Markus Lappe and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BWBLS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the International Symposium on Mixed and Augmenting Reality (ISMAR)}, pages = {67--74}, abstract = {One major benefit of wearable computers is that users can naturally move and explore computer-mediated realities. However, researchers often observe that users' space and motion perception severely differ in such environments compared to the real world, an effect that is often attributed to slight discrepancies in sensory cues, for instance, caused by tracking inaccuracy or system latency. This is particularly true for virtual reality (VR), but such conflicts are also inherent to augmented reality (AR) technologies. Although, head-worn displays will become more and more available soon, the effects on motion perception have rarely been studied, and techniques to modify self-motion in AR environments have not been leveraged so far. In this paper we introduce the concept of emphcomputer-mediated optic flow, and analyze its effects on self-motion perception in AR environments. First, we introduce different techniques to modify optic flow patterns and velocity. We present a psychophysical experiment which reveals differences in self-motion perception with a video see-through head-worn display compared to the real-world viewing condition. We show that computer-mediated optic flow has the potential to make a user perceive self-motion as faster or slower than it actually is, and we discuss its potential for future AR setups.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBS13, title = {Immersive Guided Tours for Virtual Tourism through 3D City Models}, author = { Rüdiger Beimler and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BBS13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {69--75}, abstract = {Since decades, computer-mediated realities such as virtual reality (VR) or augmented reality (AR) have been used to visualize and explore virtual city models. The inherent three-dimensional (3D) nature as well as our natural understanding of urban areas and city models makes them suitable for immersive or semi-immersive installations, which support natural exploration of such complex datasets. In this paper, we present a novel VR approach to leverage immersive guided virtual tours through 3D city models. Therefore, we combine an immersive head-mounted display (HMD) setup, which is used by one or more tourists, with a touch-enabled tabletop, which is used by the guide. While the guide overviews the entire virtual 3D city model and the virtual representations of each tourist inside the model, tourists perceive an immersive view from an egocentric perspective to regions of the city model, which can be pointed out by the guide. We describe the implementation of the setup and discuss interactive virtual tours through a 3D city model.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BS13a, title = {Implementing Walking in Virtual Environments}, author = { Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BS13a-optimized.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Human Walking in Virtual Environments: Perception, Technology, and Applications}, pages = {221--240}, publisher = {Springer}, abstract = {In the previous chapter, locomotion devices have been described, which prevent displacements in the real world while a user is walking. In this chapter we explain different strategies, which allow users to actually move through the real-world, while these physical displacements are mapped to motions of the camera in the virtual environment (VE) in order to support unlimited omnidirectional walking. Transferring a user's head movements from a physical workspace to a virtual scene is an essential component of any immersive VE. This chapter describes the pipeline of transformations from tracked real-world coordinates to coordinates of the VE. The chapter starts with an overview of different approaches for virtual walking, and gives an introduction to tracking volumes, coordinate systems and transformations required to set up a workspace for implementing virtual walking. The chapter continues with the traditional isometric mapping found in most immersive VEs, with special emphasis on combining walking in a restricted interaction volume via reference coordinates with virtual traveling metaphors (e.g., flying). Advanced mappings are then introduced with user-centric coordinates, which provide a basis to guide users on different paths in the physical workspace than what they experience in the virtual world.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{CASB13, title = {[POSTER] Touch & Move: A Portable Stereoscopic Multi-Touch Table}, author = { David Cyborra and Moritz Albert and Frank Steinicke and Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/CASB13.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {97--98}, abstract = {Recent developments in the fields of display technology provide new possibilities for engaging users in interactive exploration of three-dimensional (3D) virtual environments (VEs). Tracking technologies such as the Microsoft Kinect and emerging multi-touch interfaces enable inexpensive and low-maintenance interactive setups while providing portable solutions for engaging presentations and exhibitions. In this poster we describe an extension of the smARTbox, which is a responsive touch-enabled stereoscopic out-of-the-box technology for interactive setups. We extended the smARTbox by making the entire setup portable, which provides a new interaction experience, when exploring 3D data sets. The portable tracked multi-touch interface supports two different interaction paradigms: exploration by multi-touch gestures as well a s exploration by lateral movements of the entire setup. Hence, typical gestures supporting rotation and panning can be implemented via multi-touch gestures, but also via actual movements of the setup.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBS13a, title = {SmurVEbox: A Smart Multi-User Real-Time Virtual Environment for Generating Character Animations}, author = { Rüdiger Beimler and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BBS13a.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {1--7}, abstract = {Animating virtual characters is a complex task, which requires professional animators and performers, expensive motion capture systems, or considerable amounts of time to generate convincing results. In this paper we introduce the SmurVEbox, which is a cost-effective animating system that encompasses many important aspects of animating virtual characters by providing a novel shared user experience. SmurVEbox is a collaborative environment for generating character animations in real time, which has the potential to enhance the computer animation process. Our setup allows animators and performers to cooperate on the same virtual animation sequence in real time. Performers are able to communicate with the animator in the real space while simultaneously perceiving the effects of their actions on the virtual character in the virtual space. The animator can refine actions of a performer in real time so that both collaborate together on the same animation of a virtual character. We describe the setup and present a simple application.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSS13a, title = {To Touch or not to Touch? Comparing 2Đ Touch and 3Đ Mid-Air Interaction on Stereoscopic Tabletop Surfaces}, author = { Gerd Bruder and Frank Steinicke and Wolfgang Stürzlinger}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSS13a-optimized.pdf}, year = {2013}, date = {2013-01-01}, booktitle = {Proceedings of the ACM Symposium on Spatial User Interaction (SUI)}, pages = {1--8}, abstract = {Recent developments in touch and display technologies have laid the groundwork to combine touch-sensitive display systems with stereoscopic three-dimensional (3D) display. Although this combination provides a compelling user experience, interaction with objects stereoscopically displayed in front of the screen poses some fundamental challenges: Traditionally, touch-sensitive surfaces capture only direct contacts such that the user has to penetrate the visually perceived object to touch the 2D surface behind the object. Conversely, recent technologies support capturing finger positions in front of the display, enabling users to interact with intangible objects in mid-air 3D space. In this paper we perform a comparison between such 2D touch and 3D mid-air interactions in a Fitts' Law experiment for objects with varying stereoscopical parallax. The results show that the 2D touch technique is more efficient close to the screen, whereas for targets further away from the screen, 3D selection outperforms 2D touch. Based on the results, we present implications for the design and development of future touch-sensitive interfaces for stereoscopic displays.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BSS13b, title = {Touching the Void Revisited: Analyses of Touch Behavior On and Above Tabletop Surfaces}, author = { Gerd Bruder and Frank Steinicke and Wolfgang Stürzlinger}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSS13b.pdf}, year = {2013}, date = {2013-01-01}, journal = {Lecture Notes in Computer Science: Human-Computer Interaction - INTERACT 2013}, volume = {8117}, pages = {278--296}, abstract = {Recent developments in touch and display technologies made it possible to integrate touch-sensitive surfaces into stereoscopic three-dimensional (3D) displays. Although this combination provides a compelling user experience, interaction with stereoscopically displayed objects poses some fundamental challenges. If a user aims to select a 3D object, each eye sees a different per-spective of the same scene. This results in two distinct projections on the display surface, which raises the question where users would touch in 3D or on the two-dimensional (2D) surface to indicate the selection. In this paper we analyze the relation between the 3D positions of stereoscopically displayed objects and the on- as well as off-surface touch areas. The results show that 2D touch interaction works better close to the screen but also that 3D interaction is more suitable beyond 10cm from the screen. Finally, we discuss implications for the development of future touch-sensitive interfaces with stereoscopic display.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @article{SB13, title = {Using Perceptual Illusions for Redirected Walking}, author = { Frank Steinicke and Gerd Bruder}, year = {2013}, date = {2013-01-01}, journal = {IEEE Computer Graphics and Applications}, volume = {33}, number = {1}, pages = {6--11}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @article{Roberts2013aa, title = {Testing and Evaluation of a Wearable Augmented Reality System for Natural Outdoor Environments}, author = { David Roberts and Alberico Menozzi and James Cook and Todd Sherrill and Stephen Snarski and Pat Russler and Brian Clipp and Robert Karl and Eric Wenger and Matthew Bennett and Jennifer Mauger and William Church and Herman Towles and Stephen MacCabe and Jeffrey Webb and Jasper Lupo and Jan-Michael Frahm and Enrique Dunn and Christopher Leslie and Greg Welch}, url = {http://dx.doi.org/10.1117/12.2015621 https://sreal.ucf.edu/wp-content/uploads/2017/02/Roberts2013aa.pdf}, doi = {10.1117/12.2015621}, year = {2013}, date = {2013-01-01}, journal = {Proc. SPIE}, volume = {8735}, pages = {87350A-87350A-16}, abstract = {This paper describes performance evaluation of a wearable augmented reality system for natural outdoor environments. Applied Research Associates (ARA), as prime integrator on the DARPA ULTRA-Vis (Urban Leader Tactical, Response, Awareness, and Visualization) program, is developing a soldier-worn system to provide intuitive ‘heads-up’ visualization of tactically-relevant geo-registered icons. Our system combines a novel pose estimation capability, a helmet-mounted see-through display, and a wearable processing unit to accurately overlay geo-registered iconography (e.g., navigation waypoints, sensor points of interest, blue forces, aircraft) on the soldier’s view of reality. We achieve accurate pose estimation through fusion of inertial, magnetic, GPS, terrain data, and computer-vision inputs. We leverage a helmet-mounted camera and custom computer vision algorithms to provide terrain-based measurements of absolute orientation (i.e., orientation of the helmet with respect to the earth). These orientation measurements, which leverage mountainous terrain horizon geometry and mission planning landmarks, enable our system to operate robustly in the presence of external and body-worn magnetic disturbances. Current field testing activities across a variety of mountainous environments indicate that we can achieve high icon geo-registration accuracy (<10mrad) using these vision-based methods.}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Pillat2012, title = {Compliance estimation during bilateral teleoperation of a robotic arm}, author = {Remo Pillat and Arjun Nagendran}, doi = {10.1109/ROBIO.2012.6491194}, year = {2012}, date = {2012-12-11}, booktitle = {2012 IEEE International Conference on Robotics and Biomimetics (ROBIO)}, pages = {1591-1597}, keywords = {SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Pillat2012b, title = {A Mixed Reality System for Teaching STEM Content Using Embodied Learning and Whole-body Metaphors}, author = {Remo Pillat and Arjun Nagendran and Robb Lindgren}, doi = {10.1145/2407516.2407584}, isbn = {978-1-4503-1825-9}, year = {2012}, date = {2012-12-02}, booktitle = {Proceedings of the 11th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and Its Applications in Industry}, pages = {295-302}, publisher = {ACM}, address = {New York, NY, USA}, series = {VRCAI '12}, keywords = {SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Nagendran2012aa, title = {Continuum of Virtual-Human Space: Towards Improved Interaction Strategies for Physical-Virtual Avatars}, author = {Arjun Nagendran and Remo Pillat and Charles Hughes and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Nagendran2012aa.pdf}, year = {2012}, date = {2012-12-01}, booktitle = {Proceedings of 11th ACM SIGGRAPH International Conference on Virtual-Reality Continuum and its Applications in Industry (VRCAI 2012),}, publisher = {IEEE}, keywords = {A-ceh, A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zheng2012aa, title = {[POSTER] A General Approach for Closed-Loop Registration in AR}, author = {Feng Zheng and Ryan Schubert and Greg Welch}, year = {2012}, date = {2012-11-01}, booktitle = {ISMAR '12: Proceedings of the Eleventh IEEE International Symposium on Mixed and Augmented Reality (ISMAR'12)}, address = {Atlanta, GA, USA}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Schubert2012aa, title = {Advances in Shader Lamps Avatars for Telepresence}, author = {Ryan Schubert and Greg Welch and Peter Lincoln and Arjun Nagendran and Remo Pillat and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Schubert2012aa.pdf}, year = {2012}, date = {2012-10-01}, booktitle = {Proceedings of 3DTV-Conference 2012: The True Vision: Capture, Transmission and Display of 3D Video}, address = {ETH Zurich, Zurich, Switzerland}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Varcholik2012, title = {Establishing a baseline for text entry for a multi-touch virtual keyboard}, author = {Paul Varcholik and Joseph LaViola Jr. and Charles Hughes}, url = {http://dx.doi.org/10.1016/j.ijhcs.2012.05.007}, doi = {10.1016/j.ijhcs.2012.05.007}, year = {2012}, date = {2012-10-01}, journal = {International Journal of Human-Computer Studies}, volume = {70}, number = {10}, pages = {657-672}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Zhou2012ab, title = {Local Sequential Ensemble Kalman Filter for Simultaneously Tracking States and Parameters}, author = {Ning Zhou and Zhenyu Huang and Yulan Li and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhou2012ab.pdf}, year = {2012}, date = {2012-09-01}, booktitle = {Proceedings of 2012 North American Power Symposium}, address = {Urbana-Champaign, IL USA}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Martin2012, title = {Analysis of a Procedural System for Automatic Scenario Generation }, author = {Glenn A. Martin and Charles Hughes and J. Michael Moshell }, url = {http://www.crcnetbase.com/doi/abs/10.1201/b12319-64}, isbn = {978-1-4398-7031-0}, year = {2012}, date = {2012-07-27}, urldate = {2016-12-19}, booktitle = {Advances in Applied Human Modeling and Simulation}, pages = {536–544}, publisher = {CRC Press 2012}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Lopez2012, title = {Cross Cultural Training through Digital Puppetry}, author = {Angel Lopez and Charles Hughes and Daniel Mapes and Lisa Dieker}, url = {http://www.crcnetbase.com/doi/abs/10.1201/b12316-30}, isbn = {978-1-4398-7028-0}, year = {2012}, date = {2012-07-21}, urldate = {2016-12-19}, booktitle = {Advances in Design for Cross-Cultural Activities Part I}, pages = {247–256}, publisher = {CRC Press 2012}, chapter = {25}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @misc{Welch2012ab, title = {Haptic Communication is Increasingly Used in Interrogations}, author = {Greg Welch}, year = {2012}, date = {2012-07-01}, howpublished = {Personal communication with Fran Berrios, Department of Homeland Security, FLETC}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {misc} } @inproceedings{Hughes2012, title = {Mediated Dialogues through Multiple Networked Avatars}, author = {Charles Hughes and Daniel Mapes}, year = {2012}, date = {2012-06-14}, booktitle = {Proceedings of Immersive Education 2012}, address = {Boston, Ma}, series = {(iED 2012)}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hughes2012b, title = {Interaction Metaphors for Driving STEM Education Game Development}, author = {Darin E. Hughes and Lori C. Walters and Robb Lindgren}, year = {2012}, date = {2012-06-12}, booktitle = {Proceedings of Immersive Education 2012}, address = {Boston, Ma}, series = {(iED 2012)}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Richardson2012, title = {Experimental Tests of ‘Bidi-bot’: A Mechanism Designed for Clearing Loose Debris from the Path of Mobile Search and Rescue Robots}, author = {Robert Richardson and Arjun Nagendran and Robin Scott }, url = {http://dx.doi.org/10.1080/01691864.2012.685235 }, doi = {10.1080/01691864.2012.685235 }, year = {2012}, date = {2012-05-23}, journal = {Advanced Robotics}, volume = {26}, number = {15}, pages = {1799-1823}, keywords = {SREAL}, pubstate = {published}, tppubtype = {article} } @article{Zhou2012aa, title = {Identifying the Optimal Measurement Subspace for the Ensemble Kalman Filter}, author = {Ning Zhou and Zhenyu Huang and Greg Welch and Jinghe Zhang}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhou2012aa.pdf}, year = {2012}, date = {2012-05-01}, journal = {IET Electronics Letters}, volume = {48}, number = {11}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Braeger2012, title = {Linear Compression for Spatially-Varying BRDFs}, author = {Steven Braeger and Charles Hughes}, url = {http://doi.acm.org/10.1145/2159616.2159658}, doi = {10.1145/2159616.2159658}, isbn = {978-1-4503-1194-6}, year = {2012}, date = {2012-03-09}, booktitle = {Proceedings of the ACM SIGGRAPH Symposium on Interactive 3D Graphics and Games, Costa Mesa, California, March 9-11}, pages = {212}, publisher = {ACM}, address = {New York, NY, USA}, series = {I3D '12}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Pillat2012b, title = {A Control Paradigm for Decoupled Operation of Mobile Robots in Remote Environment}, author = {Remo Pillat and Arjun Nagendran and Charles Hughes}, doi = {10.5220/0003947205530561}, isbn = {978-989-8565-02-0}, year = {2012}, date = {2012-02-24}, booktitle = {Proceedings of the International Conference on Computer Graphics Theory and Applications and International Conference on Information Visualization Theory and Applications (VISIGRAPP 2012)}, pages = {553-561}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {conference} } @article{Nagendran2012, title = {Biologically inspired legs for UAV perched landing}, author = {Arjun Nagendran and William Crowther and Robert Richardson}, doi = {10.1109/MAES.2012.6163608}, issn = {0885-8985}, year = {2012}, date = {2012-02-01}, journal = {IEEE Aerospace and Electronic Systems Magazine}, volume = {27}, number = {2}, pages = {4-13}, keywords = {}, pubstate = {published}, tppubtype = {article} } @incollection{Rivera-Gutierrez2012aa, title = {Shader Lamps Virtual Patients: the Physical Representation of Virtual Patients}, author = {Diego Rivera-Gutierrez and Greg Welch and Peter Lincoln and Mary Whitton and Juan Cendan and David A. Chesnutt and Henry Fuchs and Benjamin Lok}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Rivera-Gutierrez2012aa.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Studies in Health Technology and Informatics, Volume 173: Medicine Meets Virtual Reality 19}, pages = {372--378}, publisher = {IOS Press}, series = {Studies in Health Technology and Informatics}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {incollection} } @unpublished{Welch2012aa, title = {Physical Manifestations of Virtual Patients}, author = {Greg Welch and Diego Rivera-Gutierrez and Peter Lincoln and Mary Whitton and Juan Cendan and David A. Chesnutt and Henry Fuchs and Benjamin Lok and Rick Skarbez}, year = {2012}, date = {2012-01-01}, note = {Poster presented at the 12th International Meeting on Simulation in Healthcare (IMSH 2011)}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {unpublished} } @inproceedings{Welch2012ac, title = {Physical-Virtual Humans: Challenges and Opportunities}, author = { Greg Welch}, url = {http://ieeexplore.ieee.org/stamp/stamp.jsp?arnumber=6296798 http://dx.doi.org/10.1109/ISUVR.2012.13 https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2012ac.pdf}, doi = {10.1109/ISUVR.2012.13}, isbn = {978-0-7695-4766-4}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the 2012 International Symposium on Ubiquitous Virtual Reality}, pages = {10--13}, publisher = {IEEE Computer Society}, address = {Washington, DC, USA}, series = {ISUVR '12}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Patel2012, title = {Revolutionizing Human-computer Interfaces: The Auditory Perspective}, author = {Neel S. Patel and Darin E. Hughes}, url = {http://doi.acm.org/10.1145/2065327.2065336}, doi = {10.1145/2065327.2065336}, issn = {1072-5520}, year = {2012}, date = {2012-01-01}, journal = {ACM Interactions}, volume = {19}, number = {1}, pages = {34--37}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{FTCSWBPLS12, title = {A Mixed Reality Space for Tangible User Interaction}, author = {Martin Fischbach and Christian Treffs and David Cyborra and Alexander Strehler and Thomas Wedler and Gerd Bruder and Andreas Pusch and Marc E. Latoschik and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/FTCSWBPLS12.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {25--36}, abstract = {Recent developments in the field of semi-immersive display technologies provide new possibilities for engaging users in interactive three-dimensional virtual environments (VEs). For instance, combining low-cost tracking systems (such as the Microsoft Kinect) and multi-touch interfaces enables inexpensive and easily maintainable interactive setups. The goal of this work is to bring together virtual as well as real objects on a stereoscopic multi-touch enabled tabletop surface. Therefore, we present a prototypical implementation of such a mixed reality (MR) space for tangible interaction by extending the smARTbox. The smARTbox is a responsive touch-enabled stereoscopic out-of-the-box system that is able to track users and objects above as well as on the surface. We describe the prototypical hard- and software setup which extends this setup to a MR space, and highlight design challenges for the several application examples.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBSKB12, title = {A Taxonomy for Deploying Redirection Techniques in Immersive Virtual Environments}, author = {Evan A. Suma and Gerd Bruder and Frank Steinicke and David M. Krum and Mark Bolas}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {43--46}, abstract = {Natural walking can provide a compelling experience in immersive virtual environments, but it remains an implementation challenge due to the physical space constraints imposed on the size of the virtual world. The use of redirection techniques is a promising approach that relaxes the space requirements of natural walking by manipulating the user’s route in the virtual environment, causing the real world path to remain within the boundaries of the physical workspace. In this paper, we present and apply a novel taxonomy that separates redirection techniques according to their geometric flexibility versus the likelihood that they will be noticed by users. Additionally, we conducted a user study of three reorientation techniques, which confirmed that participants were less likely to experience a break in presence when reoriented using the techniques classified as subtle in our taxonomy. Our results also suggest that reorientation with change blindness illusions may give the impression of exploring a more expansive environment than continuous rotation techniques, but at the cost of negatively impacting spatial knowledge acquisition.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BPS12, title = {Analyzing Effects of Geometric Rendering Parameters on Size and Distance Estimation in On-Axis Stereographics}, author = {Gerd Bruder and Andreas Pusch and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BPS12.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the ACM Symposium on Applied Perception (SAP)}, pages = {111--118}, abstract = {Accurate perception of size and distance in a three-dimensional virtual environment is important for many applications. However, several experiments have revealed that spatial perception of virtual environments often deviates from the real world, even when the virtual scene is modeled as an accurate replica of a familiar physical environment. While previous research has elucidated various factors that can facilitate perceptual shifts, the effects of geometric rendering parameters on spatial cues are still not well understood. In this paper, we model and evaluate effects of spatial transformations caused by variations of the geometric field of view and the interpupillary distance in on-axis stereographic display environments. We evaluated different predictions in a psychophysical experiment in which subjects were asked to judge distance and size properties of virtual objects placed in a realistic virtual scene. Our results suggest that variations in the geometric field of view have a strong influence on distance judgments, whereas variations in the geometric interpupillary distance mainly affect size judgments.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{FWLBS12, title = {Blending Real and Virtual Worlds using Self-Reflection and Fiducials}, author = {Martin Fischbach and Dennis Wiebusch and Marc E. Latoschik and Gerd Bruder and Frank Steinicke}, year = {2012}, date = {2012-01-01}, journal = {Entertainment Computing - ICEC 2012, Lecture Notes in Computer Science (LNCS)}, volume = {7522}, pages = {465--468}, abstract = {This paper presents an enhanced version of a portable out-of-the-box platform for semi-immersive interactive applications. The enhanced version combines stereoscopic visualization, marker-less user tracking, and multi-touch with self-reflection of users and tangible ob- ject interaction. A virtual fish tank simulation demonstrates how real and virtual worlds are seamlessly blended by providing a multi-modal interaction experience that utilizes a user-centric projection, body, and object tracking, as well as a consistent integration of physical and virtual properties like appearance and causality into a mixed real/virtual world.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{WBBS12, title = {Evaluation of Field of View Calibration Techniques for Head-mounted Displays and Effects on Distance Estimation}, author = {Carolin Walter and Gerd Bruder and Benjamin Bolte and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/WBBS12.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {13--24}, abstract = {Users in immersive virtual environments (VEs) with head-mounted displays (HMDs) often perceive compressed egocentric distances compared to the real world. Although various factors have been identified that affect egocentric distance perception, the main factors for this effect still remain unknown. Recent experiments suggest that miscalibration of the field of view (FOV) have a strong effect on distance perception. Unfortunately, it is not trivial to correctly set the FOV for a given HMD in such a way that the scene is rendered without mini- or magnification. In this paper we test two calibration techniques based on visual or visual-proprioceptive estimation tasks to determine the FOV of an immersive HMD and analyze the effect of the resulting FOVs on distance estimation in two experiments: (i) blind walking for long distances and (ii) blind grasping for arm-reach distances. We found an impact of the FOV on distance judgments, but calibrating the FOVs was not sufficient to compensate for distance underestimation effects.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{WFSLBS12a, title = {Evaluation von Headtracking in interaktiven virtuellen Umgebungen auf Basis der Kinect}, author = {Dennis Wiebusch and Martin Fischbach and Alexander Strehler and Marc E. Latoschik and Gerd Bruder and Frank Steinicke}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {189--200}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BSBWFL12, title = {Exploiting Perceptual Limitations and Illusions to Support Walking through Virtual Environments in Confined Physical Spaces}, author = {Gerd Bruder and Frank Steinicke and Benjamin Bolte and Phil Wieland and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSBWFL12.pdf}, year = {2012}, date = {2012-01-01}, journal = {Elsevier Displays}, volume = {34}, number = {2}, pages = {132--141}, abstract = {Head-mounted displays (HMDs) allow users to immerse in a virtual environment (VE) in which the user's viewpoint can be changed according to the tracked movements in real space. Because the size of the virtual world often differs from the size of the tracked lab space, a straightforward implementation of omni-directional and unlimited walking is not generally possible. In this article we review and discuss a set of techniques that use known perceptual limitations and illusions to support seemingly natural walking through a large virtual environment in a confined lab space. The concept behind these techniques is called redirected walking. With redirected walking, users are guided unnoticeably on a physical path that differs from the path the user perceives in the virtual world by manipulating the transformations from real to virtual movements. For example, virtually rotating the view in the HMD to one side with every step causes the user to unknowingly compensate by walking a circular arc in the opposite direction, while having the illusion of walking on a straight trajectory. We describe a number of perceptual illusions that exploit perceptual limitations of motion detectors to manipulate the user's perception of the speed and direction of his motion. We describe how gains of locomotor speed, rotation, and curvature can gradually alter the physical trajectory without the users observing any discrepancy, and discuss studies that investigated perceptual thresholds for these manipulations. We discuss the potential of self-motion illusions to shift or widen the applicable ranges for gain manipulations and to compensate for over- or underestimations of speed or travel distance in VEs. Finally, we identify a number of key issues for future research on this topic.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @article{KBBRSLK12, title = {Geometric Calibration of Head-Mounted Displays and its Effects on Distance Estimation}, author = {Falko Kellner and Benjamin Bolte and Gerd Bruder and Ulrich Rautenberg and Frank Steinicke and Markus Lappe and Reinhard Koch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/KBBRSLK12.pdf}, year = {2012}, date = {2012-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {18}, number = {4}, pages = {589--596}, abstract = {Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. However, several experiments have provided evidence that egocentric distances are perceived as compressed in VEs relative to the real world. Recent experiments suggest that the virtual view frustum set for rendering the VE has an essential impact on the user's estimation of distances. In this article we analyze if distance estimation can be improved by calibrating the view frustum for a given HMD and user. Unfortunately, in an immersive virtual reality (VR) environment, a full per user calibration is not trivial and manual per user adjustment often leads to mini- or magnification of the scene. Therefore, we propose a novel per user calibration approach with optical see-through displays commonly used in augmented reality (AR). This calibration takes advantage of a geometric scheme based on 2D point - 3D line correspondences, which can be used intuitively by inexperienced users and requires less than a minute to complete. The required user interaction is based on taking aim at a distant target marker with a close marker, which ensures non-planar measurements covering a large area of the interaction space while also reducing the number of required measurements to five. We found the tendency that a calibrated view frustum reduced the average distance underestimation of users in an immersive VR environment, but even the correctly calibrated view frustum could not entirely compensate for the distance underestimation effects.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{SBWS12, title = {[POSTER] Analysis of IR-based Virtual Reality Tracking Using Multiple Kinects}, author = {Srivishnu Satyavolu and Gerd Bruder and Pete Willemsen and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBWS12.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {149--150}, abstract = {This article presents an analysis of using multiple Microsoft Kinect Sensors to track users in a VR system. This article focuses on using multiple Kinect sensors to track infrared points for use in virtual reality applications. Multiple Kinect sensors may serve as a low-cost and affordable means to track position information across a large lab space in applications where precise location tracking is not necessary. We present our findings and analysis of the tracking range of a Kinect sensor in situations in which multiple Kinects are present. Overall, the Kinect sensor works well for this application and in lieu of more expensive options, the Kinect sensors may be a viable option for very low-cost tracking in virtual reality applications.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{FPBIS12, title = {Redirected Steering for Virtual Self-Motion Control with a Motorized Electric Wheelchair}, author = {Loren P. Fiore and Lane Phillips and Gerd Bruder and Victoria Interrante and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/FPBIS12.pdf}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC)}, pages = {45--48}, abstract = {Redirection techniques have shown great potential for enabling users to travel in large-scale virtual environments while their physical movements have been limited to a much smaller laboratory space. Traditional redirection approaches introduce a subliminal discrepancy between real and virtual motions of the user by subtle manipula- tions, which are thus highly dependent on the user and on the virtual scene. In the worst case, such approaches may result in failure cases that have to be resolved by obvious interventions, e.g., when a user faces a physical obstacle and tries to move forward. In this paper we introduce a remote steering method for redirection techniques that are used for physical trans- portation in an immersive virtual environment. We present a redirection controller for turning a legacy wheelchair device into a remote control vehicle. In a psychophysical experiment we analyze the automatic angular motion redirection with our proposed controller with respect to detectability of discrepancies between real and virtual motions. Finally, we discuss this redirection method with its novel affordances for virtual traveling.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BIPS12, title = {Redirecting Walking and Driving for Natural Navigation in Immersive Virtual Environments}, author = {Gerd Bruder and Victoria Interrante and Lane Phillips and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BIPS12-optimized.pdf}, year = {2012}, date = {2012-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {18}, number = {4}, pages = {538--545}, abstract = {Walking is the most natural form of locomotion for humans, and real walking interfaces have demonstrated their benefits for several navigation tasks. With recently proposed redirection techniques it becomes possible to overcome space limitations as imposed by tracking sensors or laboratory setups, and, theoretically, it is now possible to walk through arbitrarily large virtual environments. However, walking as sole locomotion technique has drawbacks, in particular, for long distances, such that even in the real world we tend to support walking with passive or active transportation for longer-distance travel. In this article we show that concepts from the field of redirected walking can be applied to movements with transportation devices. We conducted psychophysical experiments to determine perceptual detection thresholds for redirected driving, and set these in relation to results from redirected walking. We show that redirected walking-and-driving approaches can easily be realized in immersive virtual reality laboratories, e.g., with electric wheelchairs, and show that such systems can combine advantages of real walking in confined spaces with benefits of using vehiclebased self-motion for longer-distance travel.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{FWLBS12a, title = {smARTbox: A Portable Setup for Intelligent Interactive Applications}, author = { Martin Fischbach and Dennis Wiebusch and Marc E. Latoschik and Gerd Bruder and Frank Steinicke}, year = {2012}, date = {2012-01-01}, booktitle = {Mensch & Computer 2012 - Workshopband: interaktiv informiert - allgegenw"artig und allumfassend!?}, pages = {521--524}, abstract = {This paper presents a semi-immersive, multimodal fish tank simulation realized using the smARTbox, an out-of-the-box platform for intelligent interactive applications. The smARTbox provides portability, stereoscopic visualization, marker-less user tracking and direct interscopic touch input. Off-the-shelf hardware is combined with a state-of-the-art simulation platform to provide a powerful system environment. The environment combines direct (touch) and indirect (movement) interaction.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{FLBS12, title = {smARTbox: Out-of-the-box Technologies for Interactive Art and Exhibition}, author = {Martin Fischbach and Marc E. Latoschik and Gerd Bruder and Frank Steinicke}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {1--7}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{BSWL12, title = {Tuning Self-Motion Perception in Virtual Reality with Visual Illusions}, author = {Gerd Bruder and Frank Steinicke and Phil Wieland and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSWL12.pdf}, year = {2012}, date = {2012-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {18}, number = {7}, pages = {1068--1078}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{SB12, title = {Visual Perception of Perspective Distortions}, author = {Frank Steinicke and Gerd Bruder}, year = {2012}, date = {2012-01-01}, booktitle = {Proceedings of the IEEE Virtual Reality Workshop on Perceptual Illusions in Virtual Environments (PIVE)}, pages = {37--40}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Welch2011ac, title = {Physical Manifestations of Virtual Patients}, author = {Greg Welch and Diego Rivera-Gutierrez and Peter Lincoln and Mary Whitton and Juan Cendan and David A. Chesnutt and Henry Fuchs and Benjamin Lok and Rick Skarbez}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2011ac.pdf}, year = {2011}, date = {2011-12-01}, journal = {Simulation in Healthcare}, volume = {6}, number = {6}, pages = {488}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Nagendran2011b, title = {Dynamic capture of free-moving objects}, author = {Arjun Nagendran and William Crowther and Robert Richardson}, doi = {10.1177/0959651811407659}, isbn = {0959-6518}, year = {2011}, date = {2011-12-01}, journal = {Institution of Mechanical Engineers. Proceedings. Part I: Journal of Systems and Control Engineering}, volume = {225}, number = {8}, pages = {1054-1067}, publisher = {SAGE Publications Ltd}, keywords = {SREAL}, pubstate = {published}, tppubtype = {article} } @article{Walters2011, title = {Interconnections: Revisiting the Future }, author = {Lori C. Walters and Darin E. Hughes and Charles Hughes}, doi = {10.1177/1555412011431360 }, year = {2011}, date = {2011-11-01}, journal = {Games and Culture}, volume = {6}, number = {6}, pages = {538-559}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @unpublished{Zhang2011ad, title = {Adaptive Kalman Filtering for Robust Power system State Tracking}, author = {Jinghe Zhang and Greg Welch and Gary Bishop and Zhenyu Huang}, year = {2011}, date = {2011-10-01}, note = {Poster presented at the DOE Applied Mathematics Program meeting}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {unpublished} } @article{Richardson2011, title = {The sweep-extend mechanism: A 10-bar mechanism to perform biologically inspired burrowing motions}, author = {Robert Richardson and Arjun Nagendran and Robin Scott}, url = {http://dx.doi.org/10.1016/j.mechatronics.2011.03.002}, doi = {10.1016/j.mechatronics.2011.03.002}, year = {2011}, date = {2011-09-01}, journal = {Mechatronics}, volume = {21}, number = {6}, pages = {939-950}, keywords = {SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{IIie2011aa, title = {On-line control of active camera networks for computer vision tasks}, author = {Adrian Ilie and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2011aa.pdf}, doi = {10.1109/ICDSC.2011.6042926}, year = {2011}, date = {2011-08-01}, booktitle = {2011 Fifth ACM/IEEE International Conference on Distributed Smart Cameras}, pages = {1-6}, note = {Awarded 2nd Prize Best ICDSC 2011 Paper.}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Zhu2011, title = {Why can’t a virtual character be more like a human: A mixed initiative approach to believable agents}, author = {Jichen Zhu and J. Michael Moshell and Santiago Ontañón and Elena Erbiceanu and Charles Hughes}, editor = {Randall Shumaker}, doi = {10.1007/978-3-642-22024-1_32}, year = {2011}, date = {2011-07-25}, booktitle = {Virtual and Mixed Reality - Systems and Applications }, issuetitle = {International Conference, Virtual and Mixed Reality 2011, Held as Part of HCI International 2011, Orlando, FL, USA, July 9-14, 2011, Proceedings, Part II}, volume = {6774}, pages = {289-296}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science }, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Zhang2011ac, title = {LoDiM: A Novel Power System State Estimation Method with Dynamic Measurement Selection}, author = {Jinghe Zhang and Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2011ac.pdf}, year = {2011}, date = {2011-07-01}, booktitle = {Proceedings of 2011 IEEE Power & Energy Society General Meeting}, address = {Detroit, MI, USA}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zhang2011aa, title = {Reduced Measurement-space Dynamic State Estimation (ReMeDySE) for Power Systems}, author = {Jinghe Zhang and Greg Welch and Gary Bishop and Zhenyu Huang}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2011aa.pdf}, year = {2011}, date = {2011-06-01}, booktitle = {PowerTech, 2011 IEEE Trondheim}, address = {Trondheim, Norway}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zhang2011ab, title = {Power System State Estimation with Dynamic Optimal Measurement Selection}, author = {Jinghe Zhang and Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2011ab.pdf}, year = {2011}, date = {2011-04-01}, booktitle = {Proceedings of 2011 IEEE Symposium on Computational Intelligence Applications in Smart Grid}, address = {Paris, France}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Lincoln2011aa, title = {Continual Surface-Based Multi-Projector Blending for Moving Objects}, author = {Peter Lincoln and Greg Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Lincoln2011aa.pdf}, year = {2011}, date = {2011-03-01}, journal = {Proceedings of IEEE Virtual Reality Conference (VR), 2011}, pages = {115--118}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {article} } @incollection{Welch2011aa, title = {Remote 3D Medical Consultation}, author = {Gregory Welch and Diane Sonnenwald and Henry Fuchs and Bruce Cairns and Ketan Mayer-Patel and Ruigang Yang and Andrei State and Herman Towles and Adrian Ilie and Srinivas Krishnan and Hanna Söderholm}, editor = {Sabine Coquillart and Guido Brunnett and Gregory Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/11/Welch2011aa-1.pdf}, isbn = {978-3-211-99177-0}, year = {2011}, date = {2011-01-01}, booktitle = {Virtual Realities: Dagstuhl Seminar 2008}, pages = {139--159}, publisher = {Springer}, chapter = {8}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {incollection} } @book{Brunnett2011, title = {Virtual Realities: Dagstuhl Seminar 2008}, editor = {Guido Brunnett and Sabine Coquillart and Gregory Welch}, doi = {10.1007/978-3-211-99178-7}, year = {2011}, date = {2011-01-01}, publisher = {Springer-Verlag Wien}, edition = {1st Edition}, keywords = {A-gfw, SREAL}, pubstate = {published}, tppubtype = {book} } @article{Nowrouzezahrai2011, title = {A Programmable System for Artistic Volumetric Lighting}, author = { Derek Nowrouzezahrai and Jared Johnson and Andrew Selle and Dylan Lacewell and Michael Kaschalk and Wojciech Jarosz}, url = {http://doi.acm.org/10.1145/2010324.1964924}, doi = {10.1145/2010324.1964924}, issn = {0730-0301}, year = {2011}, date = {2011-01-01}, journal = {ACM Transactions on Graphics}, volume = {30}, number = {4}, pages = {29:1--29:8}, publisher = {ACM}, address = {New York, NY, USA}, keywords = {SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{VSBH11, title = {2Đ Touching of 3Đ Stereoscopic Objects}, author = {Dimitar Valkov and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VSBH11.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the ACM Conference on Human Factors in Computing Systems (CHI)}, pages = {1353--1362}, abstract = {Recent developments in the area of touch and display technologies have suggested to combine multi-touch systems and stereoscopic visualization. Stereoscopic perception requires each eye to see a slightly different perspective of the same scene, which results in two distinct projections on the display. Thus, if the user wants to select a 3D stereoscopic object in such a setup, the question arises where she would touch the 2D surface to indicate the selection. A user may apply different strategies, for instance touching the midpoint between the two projections, or touching one of them. In this paper we analyze the relation between the 3D positions of stereoscopically rendered objects and the on-surface touch points, where users touch the surface. We performed an experiment in which we determined the positions of the users' touches for objects, which were displayed with positive, negative or zero parallaxes. We found that users tend to touch between the projections for the two eyes with an offset towards the projection for the dominant eye. Our results give implications for the development of future touch-enabled interfaces, which support 3D stereoscopic visualization.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Bru11, title = {Augmenting Geometric Fields of View and Scaling Head Rotations for Efficient Exploration in Head-Mounted Display Environments}, author = {Gerd Bruder}, url = {Physical characteristics and constraints of today’s head-mounted displays (HMDs) often impair interaction in immersive virtual environments (VEs). For instance, due to the limited field of view (FOV) subtended by the display units in front of the user’s eyes more effort is required to explore a VE by head rotations than for exploration in the real world. In this paper we propose a combination of two augmentation techniques that have the potential to make exploration of VEs more efficient: (1) augmenting the geometric FOV (GFOV) used for rendering the VE, and (2) amplifying head rotations while the user changes her head orientation. In order to identify how much manipulation can be applied without users noticing, we conducted two psychophysical experiments in which we analyzed subjects’ ability to discriminate between virtual and real head pitch and roll rotations while three different geometric FOVs were used. Our results show that the combination of both techniques has great potential to support efficient exploration of VEs. We found that virtual pitch and roll rotations can be amplified by 30% and 44% respectively, when the GFOV matches the subject’s estimation of the most natural FOV. This leads to a possible reduction of the user’s effort required to explore the VE using a combination of both techniques by approximately 25%.}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the IEEE Virtual Reality Workshop on Perceptual Illusions in Virtual Environments (PIVE)}, pages = {9}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBS11, title = {Impact of Visual Orientation Cues on Angular Motion Redirection}, author = {Benjamin Bolte and Gerd Bruder and Frank Steinicke}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the GI-Workshop VR/AR}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBS11b, title = {Jumping through Immersive Video Games}, author = {Benjamin Bolte and Gerd Bruder and Frank Steinicke}, year = {2011}, date = {2011-01-01}, booktitle = {SIGGRAPH Asia 2011 Technical Sketches}, pages = {1}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @phdthesis{Bru11a, title = {Making Small Spaces Feel Large: Self-motion Perception, Redirection and Illusions}, author = {Gerd Bruder}, year = {2011}, date = {2011-01-01}, school = {Department of Computer Science, University of M}, keywords = {A-gb}, pubstate = {published}, tppubtype = {phdthesis} } @article{SBLKWH11, title = {Natural Perspective Projections for Head-Mounted Displays}, author = {Frank Steinicke and Gerd Bruder and Markus Lappe and Scott Kuhl and Pete Willemsen and Klaus H. Hinrichs}, year = {2011}, date = {2011-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {17}, number = {7}, pages = {888--899}, abstract = {The display units integrated in today's head-mounted displays (HMDs) provide only a limited field of view (FOV) to the virtual world. In order to present an undistorted view to the virtual environment (VE), the perspective projection used to render the VE has to be adjusted to the limitations caused by the HMD characteristics. In particular, the geometric field of view (GFOV), which defines the virtual aperture angle used for rendering of the 3D scene, is set up according to the display field of view (DFOV). A discrepancy between these two fields of view distorts the geometry of the VE in a way that either minifies or magnifies the imagery displayed to the user. It has been shown that this distortion has the potential to affect a user's perception of the virtual space, sense of presence, and performance on visual search tasks. In this paper, we analyze the user's perception of a VE displayed in a HMD, which is rendered with different GFOVs. We introduce a psychophysical calibration method to determine the HMD's actual field of view, which may vary from the nominal values specified by the manufacturer. Furthermore, we conducted two experiments to identify perspective projections for HMDs, which are identified as natural by subjects--even if these perspectives deviate from the perspectives that are inherently defined by the DFOV. In the first experiment, subjects had to adjust the GFOV for a rendered virtual laboratory such that their perception of the virtual replica matched the perception of the real laboratory, which they saw before the virtual one. In the second experiment, we displayed the same virtual laboratory, but restricted the viewing condition in the real world to simulate the limited viewing condition in a HMD environment. We found that subjects evaluate a GFOV as natural when it is larger than the actual DFOV of the HMD--in some cases up to 50 percent--even when subjects viewed the real space with a limited field of view.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BS11a, title = {Perceptual Evaluation of Interpupillary Distances in Head-mounted Display Environments}, author = {Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BS11a.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the GI-Workshop VR/AR}, pages = {135--146}, abstract = {Head-mounted displays (HMDs) allow users to explore virtual environments (VEs) from an egocentric perspective. In order to present a realistic view, the rendering system has to be adjusted to the characteristics of the HMD, e.g., the display's field of view (FOV), as well as to characteristics that are unique for each user, in particular, the interpupillary distance (IPD). In the optimal case, the rendering system is calibrated to the binocular configuration of the HMD, and adapted to the measured IPD of the user. A discrepancy between the user's IPD and stereoscopic rendering may distort the perception of the VE, since objects may appear minified or magnified. In this paper, we describe binocular calibration of HMDs, and evaluate which IPDs are judged as most natural by HMD users. In our experiment, subjects had to adjust the IPD for a rendered virtual replica of our laboratory until perception of the virtual replica matched perception of the real laboratory. Our results motivate that the IPDs which are estimated by subjects as most natural are affected by the FOV of the HMD, and the geometric FOV used for rendering. In particular, we found that with increasing fields of view, subjects tend to underestimate their geometric IPD.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSWM11, title = {[POSTER] Evaluation of Field of View Calibration Techniques for Head-mounted Displays}, author = {Gerd Bruder and Frank Steinicke and Carolin Walter and Mathias Möhring}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSWM11.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {ACM Symposium on Applied Perception in Graphics and Visualization}, pages = {125}, abstract = {In this poster we present a comparison of two calibration techniques that allow to determine the field of view (FOV) for immersive headmounted displays (HMDs).}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SBK11, title = {Realistic Perspective Projections for Virtual Objects and Environments}, author = { Frank Steinicke and Gerd Bruder and Scott Kuhl}, year = {2011}, date = {2011-01-01}, journal = {ACM Transactions on Graphics (TOG)}, volume = {30}, number = {5}, pages = {112:1--112:10}, abstract = {Computer graphics systems provide sophisticated means to render virtual 3D space to 2D display surfaces by applying planar geometric projections. In a realistic viewing condition the perspective applied for rendering should appropriately account for the viewer's location relative to the image. As a result, an observer would not be able to distinguish between a rendering of a virtual environment on a computer screen and a view "through" the screen at an identical real-world scene. Until now, little effort has been made to identify perspective projections which cause human observers to judge them to be realistic. In this article we analyze observers' awareness of perspective distortions of virtual scenes displayed on a computer screen. These distortions warp the virtual scene and make it differ significantly from how the scene would look in reality. We describe psychophysical experiments that explore the subject's ability to discriminate between different perspective projections and identify projections that most closely match an equivalent real scene. We found that the field of view used for perspective rendering should match the actual visual angle of the display to provide users with a realistic view. However, we found that slight changes of the field of view in the range of 10-20% for two classes of test environments did not cause a distorted mental image of the observed models.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BSW11, title = {Self-motion illusions in immersive virtual reality environments}, author = { Gerd Bruder and Frank Steinicke and Phil Wieland}, editor = {Michitaka Hirose and Benjamin Lok and Aditi Majumder and Dieter Schmalstieg}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSW11.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {39--46}, abstract = {Motion perception in immersive virtual reality environments significantly differs from the real world. For example, previous work has shown that users tend to underestimate travel distances in immersive virtual environments (VEs). As a solution to this problem, some researchers propose to scale the mapped virtual camera motion relative to the tracked real-world movement of a user until real and virtual motion appear to match, i.e., real-world movements could be mapped with a larger gain to the VE in order to compensate for the underestimation. Although this approach usually results in more accurate self-motion judgments by users, introducing discrepancies between real and virtual motion can become a problem, in particular, due to misalignments of both worlds and distorted space cognition. In this paper we describe a different approach that introduces apparent self-motion illusions by manipulating optic flow fields during movements in VEs. These manipulations can affect self-motion perception in VEs, but omit a quantitative discrepancy between real and virtual motions. We introduce four illusions and show in experiments that optic flow manipulation can significantly affect users' self-motion judgments. Furthermore, we show that with such manipulation of optic flow fields the underestimation of travel distances can be compensated.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBS11a, title = {The Jumper Metaphor: An Effective Navigation Technique for Immersive Display Setups}, author = {Benjamin Bolte and Gerd Bruder and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BBS11a.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {1--7}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{VBBS11, title = {VIARGO: A Generic VR-based Interaction Library}, author = { Dimitar Valkov and Gerd Bruder and Benjamin Bolte and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VBBS11.pdf}, year = {2011}, date = {2011-01-01}, booktitle = {Proceedings of the Workshop on Software Engineering and Architectures for Realtime Interactive Systems (SEARIS)}, pages = {23--28}, abstract = {Traditionally, interaction techniques for virtual reality applications are implemented in a proprietary way on specific target platforms, e. g., requiring specific hardware, physics or rendering libraries, which withholds reusability and portability. Though hardware abstraction layers for numerous devices are provided by multiple virtual reality libraries, they are usually tightly bound to a particular rendering environment. In this paper we introduce Viargo - a generic virtual reality interaction library, which serves as additional software layer that is independent from the application and its linked libraries, i. e., a once developed interaction technique, such as walking with a headmounted display or multi-touch interaction, can be ported to different hard- or software environments with minimal code adaptation. We describe the underlying concepts and present examples on how to integrate Viargo in different graphics engines, thus extending proprietary graphics libraries with a few lines of code to easy-to-use virtual reality engines.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Risi2010, title = {Evolving plastic neural networks with novelty search}, author = {Sebastian Risi and Charles Hughes and Kenneth O. Stanley }, doi = {doi:10.1177/1059712310379923 }, year = {2010}, date = {2010-12-01}, journal = {Adaptive Behavior}, volume = {18}, number = {6}, pages = {470-491}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Zhang2010ab, title = {Optimal PMU Placement Evaluation for Power System Dynamic State Estimation}, author = {Jinghe Zhang and Greg Welch and Gary Bishop and Zhenyu Huang}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2010ab.pdf}, year = {2010}, date = {2010-10-01}, booktitle = {Proceedings of IEEE PES Conference on Innovative Smart Grid Technologies Europe (ISGT 2010)}, address = {Chalmers Lindholmen, Gothenburg, Sweden}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Zhang2010aa, title = {Observability and Estimation Uncertainty Analysis for PMU Placement Alternatives}, author = {Jinghe Zhang and Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Zhang2010aa.pdf}, year = {2010}, date = {2010-09-01}, booktitle = {Proceedings of the 2010 North American Power Symposium (NAPS 2010)}, address = {Arlington, TX, U.S.A.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Schultz2010, title = {The Rehabilitation of Shaken Soldier Syndrome: A Coordinated System of Community-Situated Postacute Treatment for Blast-Injured Veterans}, author = {Larry Shultz and Kenyatta Rivers and Elizabeth McNamara and Charles Hughes}, editor = {Dennis G. Stanton and Lawrence R. Castaneda}, isbn = {9781608761494 }, year = {2010}, date = {2010-08-01}, booktitle = {Military Psychiatry: New Developments}, pages = {71-106}, publisher = {Nova Science Publishers}, chapter = {3}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inbook{Martin2010, title = {What is a Scenario? Operationalizing Training Scenarios for Automatic Generation}, author = {Glenn A. Martin and Sae Schatz and Charles Hughes and Denise Nicholson}, doi = {10.1201/EBK1439834916-c74}, isbn = {978-1-4398-3491-6}, year = {2010}, date = {2010-06-23}, booktitle = {Advances in Cognitive Ergonomics}, issuetitle = {Applied Human Factors and Ergonomics 2010 (AHFE2010), July 17-29}, pages = {746–753}, publisher = {CRC Press 2010}, address = {Miami, FL}, chapter = {74}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{Lincoln2010aa, title = {Animatronic shader lamps avatars}, author = {Peter Lincoln and Greg Welch and Andrew Nashel and Andrei State and Adrian Ilie and Henry Fuchs}, url = {http://dx.doi.org/10.1007/s10055-010-0175-5 https://sreal.ucf.edu/wp-content/uploads/2017/02/Lincoln2010aa.pdf}, doi = {10.1007/s10055-010-0175-5}, issn = {1359-4338}, year = {2010}, date = {2010-01-01}, journal = {Virtual Reality}, pages = {1-14}, publisher = {Springer London}, note = {10.1007/s10055-010-0175-5}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Charbonneau2010, title = {Vibraudio Pose: An Investigation of Non-visual Feedback Roles for Body Controlled Video Games}, author = { Emiko Charbonneau and Charles Hughes and Joseph J. {LaViola Jr.} }, url = {http://doi.acm.org/10.1145/1836135.1836147}, doi = {10.1145/1836135.1836147}, isbn = {978-1-4503-0097-1}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the 5th ACM SIGGRAPH Symposium on Video Games}, pages = {79--84}, publisher = {ACM}, address = {Los Angeles, California}, series = {Sandbox '10}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Martin2010b, title = {The Use of Functional L-systems for Scenario Generation in Serious Games}, author = { Glenn A. Martin and Charles E. Hughes and Sae Schatz and Denise Nicholson}, url = {http://doi.acm.org/10.1145/1814256.1814262}, doi = {10.1145/1814256.1814262}, isbn = {978-1-4503-0023-0}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the 2010 Workshop on Procedural Content Generation in Games}, pages = {6:1--6:5}, publisher = {ACM}, address = {Monterey, California}, series = {PCGames '10}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Martin2010b, title = {A Scenario Generation Framework for Automating Instructional Support in Scenario-based Training}, author = { Glenn A. Martin and Charles E. Hughes}, url = {http://dx.doi.org/10.1145/1878537.1878574}, doi = {10.1145/1878537.1878574}, isbn = {978-1-4503-0069-8}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the 2010 Spring Simulation Multiconference}, pages = {35:1--35:6}, publisher = {Society for Computer Simulation International}, address = {Orlando, Florida}, series = {SpringSim '10}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{DLBHS10, title = {3D-Manipulationstechnik f}, author = { David Donszik and Bastian Lengert and Gerd Bruder and Klaus H. Hinrichs and Frank Steinicke}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/DLBHS10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {83--92}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{VSBH10b, title = {A Multi-Touch enabled Human-Transporter Metaphor for Virtual 3D Traveling}, author = {Dimitar Valkov and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VSBH10b.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {79--82}, abstract = {In this tech-note we demonstrate how multi-touch hand gestures in combination with foot gestures can be used to perform navigation tasks in interactive 3D environments. Geographic Information Systems (GIS) are well suited as a complex testbed for evaluation of user interfaces based on multi-modal input. Recent developments in the area of interactive surfaces enable the construction of lowcost multi-touch displays and relatively inexpensive sensor technology to detect foot gestures, which allows to explore these input modalities for virtual reality environments. In this tech-note, we describe an intuitive 3D user interface metaphor and corresponding hardware, which combine multi-touch hand and foot gestures for interaction with spatial data.}, note = {(acceptance rate 25%)}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BZBSHFS10, title = {[POSTER] A Virtual Reality Handball Goalkeeper Analysis System}, author = {Benjamin Bolte and Florian Zeidler and Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs and Lennart Fischer and Jőrg Schorer}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BZBSHFS10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC) (Poster Presentation)}, pages = {1--2}, abstract = {Understanding how professional handball goalkeepers acquire skills to combine decision-making and complex motor tasks is a multidisciplinary challenge. In order to improve a goalkeeper's training by allowing insights into their complex perception, learning and action processes, virtual reality (VR) technologies provide a way to standardize experimental sport situations. In this poster we describe a VR-based handball system, which supports the evaluation of perceptual-motor skills of handball goalkeepers during shots. In order to allow reliable analyses it is essential that goalkeepers can move naturally like they would do in a real game situation, which is often inhibited by wires or markers that are usually used in VR systems. To address this challenge, we developed a camera-based goalkeeper analysis system, which allows to detect and measure motions of goalkeepers in real-time.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BBSHL10, title = {Augmentation Techniques for Efficient Exploration in Head-Mounted Display Environments}, author = {Benjamin Bolte and Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BBSHL10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {11--18}, abstract = {Physical characteristics and constraints of today's head-mounted displays (HMDs) often impair interaction in immersive virtual environments (VEs). For instance, due to the limited field of view (FOV) subtended by the display units in front of the user's eyes more effort is required to explore a VE by head rotations than for exploration in the real world. In this paper we propose a combination of two augmentation techniques that have the potential to make exploration of VEs more efficient: (1) augmenting the geometric FOV (GFOV) used for rendering the VE, and (2) amplifying head rotations while the user changes her head orientation. In order to identify how much manipulation can be applied without users noticing, we conducted two psychophysical experiments in which we analyzed subjects' ability to discriminate between virtual and real head pitch and roll rotations while three different geometric FOVs were used. Our results show that the combination of both techniques has great potential to support efficient exploration of VEs. We found that virtual pitch and roll rotations can be amplified by 30% and 44% respectively, when the GFOV matches the subject's estimation of the most natural FOV. This leads to a possible reduction of the user's effort required to explore the VE using a combination of both techniques by approximately 25%.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSVH10, title = {Augmented Virtual Studio for Architectural Exploration}, author = {Gerd Bruder and Frank Steinicke and Dimitar Valkov and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSVH10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {1--8}, abstract = {Immersive virtual environments and natural 3D user interfaces have shown great potential in the field of architecture, especially for exploration, presentation and review of designs. In this paper we propose an augmented virtual studio environment for architectural exploration based on a mixed-reality head-mounted display environment. The proposed system supports (1) real walking through large virtual building models, (2) visual feedback about the user's body and (3) display of real-world objects in the virtual view based on color transfer functions. We describe the locomotion user interface for immersive exploration, as well as a mixed-reality 3D user interface for interaction with virtual designs.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSBH10, title = {Auswirkungen biokularer Videobilder als Selbst-Repr}, author = {Annika Busch and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {157--170}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBHW10, title = {Change Blindness Phenomena for Stereoscopic Projection Systems}, author = {Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Pete Willemsen}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHW10-optimized.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {187--194}, abstract = {In visual perception, change blindness describes the phenomenon that persons viewing a visual scene may apparently fail to detect significant changes in that scene. These phenomena have been observed in both computer generated imagery and real-world scenes. Several studies have demonstrated that change blindness effects occur primarily during visual disruptions such as blinks or saccadic eye movements. However, until now the influence of stereoscopic vision on change blindness has not been studied thoroughly in the context of visual perception research. In this paper we introduce change blindness techniques for stereoscopic projection systems, providing the ability to substantially modify a virtual scene in a manner that is difficult for observers to perceive. We evaluate techniques for passive and active stereoscopic viewing and compare the results to those of monoscopic viewing conditions. For stereoscopic viewing conditions, we find that change blindness phenomena can be applied with a larger magnitude as compared to monoscopic viewing of a scene. We have also evaluated the potential of the presented techniques for allowing abrupt, and yet significant, changes of a stereoscopically displayed virtual reality environment.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SBJFL10, title = {Estimation of Detection Thresholds for Redirected Walking Techniques}, author = {Frank Steinicke and Gerd Bruder and Jason Jerald and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBJFL10-optimized.pdf}, year = {2010}, date = {2010-01-01}, journal = {IEEE Transactions on Visualization and Computer Graphics (TVCG)}, volume = {16}, number = {1}, pages = {17--27}, abstract = {In immersive virtual environments users can control their virtual viewpoint by walking through the real world, and movements are mapped one-to-one to virtual camera motions. With redirection techniques, the virtual camera is manipulated by applying gains so that the virtual world moves differently than the real world. We have quantified how much humans can unknowingly be redirected on paths, which are different from the visually perceived paths. We tested 12 subjects in three different psychophysical experiments. In experiment E1, subjects performed rotations with different gains, and then had to choose whether the visually perceived rotation was smaller or greater than the physical rotation. In experiment E2, subjects chose whether the physical walk was shorter or longer than the visually perceived scaled travel distance. In experiment E3, subjects estimate the path curvature when walking a curved path in the real world while the visual display shows a straight path in the virtual world. Our results show that users can be turned physically about 49% more or 20% less than the perceived virtual rotation, distances can be downscaled by 14% and up-scaled by 26%, and users can be redirected on a circular arc with a radius greater than 22m.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BSH10, title = {[POSTER] Estimation of Virtual Interpupillary Distances for Immersive Head-Mounted Displays}, author = {Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSH10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV) (Poster Presentation)}, pages = {168}, abstract = {Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. In order to present a realistic stereoscopic view, the rendering system has to be adjusted to the characteristics of the HMD, e. g., the display's field of view (FOV), as well as to characteristics that are unique for each user, in particular her interpupillary distance (IPD). Typically, the user's IPD is measured, and then applied to the virtual IPD used for rendering, assuming that the HMD's display units are correctly adjusted in front of the user's eyes. A discrepancy between the user's IPD and the virtual IPD may distort the perception of the VE. In this poster we analyze the user's perception of a VE in a HMD environment, which is displayed stereoscopically with different IPDs. We conducted an experiment to identify virtual IPDs that are identified as natural by subjects for different FOVs. In our experiment, subjects had to adjust the IPD for a rendered virtual replica of our real laboratory until perception of the virtual replica matched perception of the real laboratory. We found that the virtual IPDs subjects estimate as most natural are often not identical to their IPDs, and that the estimations were affected by the FOV of the HMD and the virtual FOV used for rendering.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SBHS10, title = {Gradual Transitions and their Effects on Presence and Distance Estimation}, author = {Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Anthony Steed}, year = {2010}, date = {2010-01-01}, journal = {Computers & Graphics}, volume = {34}, number = {1}, pages = {26--33}, abstract = {Several experiments have provided evidence that ego-centric distances are perceived as compressed in immersive virtual environments relative to the real world. The principal factors responsible for this phenomenon have remained largely unknown. However, recent experiments suggest that when the virtual environment (VE) is an exact replica of a user's real physical surroundings, the person's distance perception improves. Based on this observation, it sounds reasonable that if subjects feel a high degree of situational awareness in a known VE, their ability for estimating distances may be much better compared to an unfamiliar virtual world. This raises the question, whether starting the virtual reality (VR) experience in such a virtual replica and gradually transiting to a different VE has potential to increase a person's sense of presence as well as distance perception skills in an unknown virtual world. In this case the virtual replica serves as transitional environment between reality and a virtual world. Although transitional environments are already applied in some VR demonstrations, until now it has not been verified whether such a gradual transition improves a user's VR experience. We have conducted two experiments to quantify to what extent a gradual transition to a virtual world via a transitional environment improves a person's level of presence and ability to estimate distances in the VE. We have found that the subjects' self-reported sense of presence shows significantly higher scores, and that the subjects' distance estimation skills in the VE improved significantly, when they entered the VE via a transitional environment.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BSVH10a, title = {[POSTER] Immersive Virtual Studio for Architectural Exploration}, author = {Gerd Bruder and Frank Steinicke and Dimitar Valkov and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSVH10a-optimized.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation)}, pages = {125--126}, abstract = {Architects use a variety of analog and digital tools and media to plan and design constructions. Immersive virtual reality (VR) technologies have shown great potential for architectural design, especially for exploration and review of design proposals. In this work we propose a virtual studio system, which allows architects and clients to use arbitrary real-world tools such as maps or rulers during immersive exploration of virtual 3D models. The user interface allows architects and clients to review designs and compose 3D architectural scenes, combining benefits of mixed-reality environments with immersive head-mounted display setups.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @book{BMC10, title = {JVRC Tutorial: Walking Experiences in Virtual Worlds}, author = {Gerd Bruder and Betty Mohler and Gabriel Cirio}, year = {2010}, date = {2010-01-01}, publisher = {ACM Press}, series = {Course Notes of the Joint Virtual Reality Conference of EuroVR - EGVE - VEC}, abstract = {Active exploration enables us humans to construct a rich and coherent percept of our environment. By far the most natural way to move through the real world is via locomotion like walking or running. The same should also be true for computer generated three-dimensional environments. Keeping such an active and dynamic ability to navigate through large-scale virtual scenes is of great interest for many 3D applications demanding locomotion, such as tourism, architecture or interactive entertainment. However, today it is still mostly impossible to freely walk through computer generated environments in order to actively explore them. The primary reason for this is the scientific and technological underdevelopment in this sector. While moving in the real world, sensory information such as vestibular, proprioceptive, and efferent copy signals as well as visual information create consistent multi-sensory cues that indicate one's own motion, i.e., acceleration, speed and direction of travel. Computer graphics environments were initially restricted to visual displays, combined with interaction devices, e.g. joystick or mouse, for providing (often unnatural) inputs to generate self-motion. Nowadays, more and more interaction devices, e.g., Nintendo's Wii or Sony's EyeToy, enable intuitive and natural interaction. In this context, many research groups are investigating natural, multimodal methods of generating self-motion in virtual worlds based on these consumer hardware. An obvious approach is to transfer the user's tracked head movements to changes of the camera in the virtual world by means of a one-to-one mapping. Then, a one meter movement in the real world is mapped to a one meter movement of the virtual camera in the corresponding direction in the virtual environment (VE). This technique has the drawback that the user's movements are restricted by a limited range of the tracking sensors, e.g. optical cameras, and a rather small workspace in the real world. The size of the virtual world often differs from the size of the tracked space so that a straightforward implementation of omni-directional and unlimited walking is not possible. Thus, concepts for virtual locomotion methods are needed that enable walking over large distances in the virtual world while remaining within a relatively small space in the real world. In this tutorial we will present an overview about the development of locomotion interfaces for computer generated virtual environments ranging from desktop-based camera manipulations simulating walking, and different walking metaphors for virtual reality (VR)-based environments to state-of-the-art hardware-based solutions that enable omni-directional and unlimited real walking through virtual worlds.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {book} } @inproceedings{VSBH10a, title = {Navigation through Geospatial Environments with a Multi-Touch enabled Human-Transporter Metaphor}, author = { Dimitar Valkov and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of Geoinformatik}, abstract = {Geospatial environments provide users with complex and detailed 3D data sets. While many different visualization techniques allow valuable insights, intuitive and natural exploration approaches are often missing, and hence usually only domain-experts are able to efficiently explore such data sets. In this paper we demonstrate a virtual reality (VR) based setup for geographic information systems (GIS), which allows users to perform navigation tasks in stereoscopically displayed interactive 3D geospatial environments using multi-touch hand gestures in combination with foot input. Furthermore, we have performed an initial user study in which we analyzed the proposed setup.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBK10, title = {Perception of Perspective Distortions of Man-Made Virtual Objects}, author = { Frank Steinicke and Gerd Bruder and Scott Kuhl}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBK10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH) (Conference DVD)}, publisher = {ACM Press}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{VSBHSDK10, title = {Touching Floating Objects in Projection-based Virtual Reality Environments}, author = {Dimitar Valkov and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Johannes Schöning and Florian Daiber and Antonio Krüger}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VSBHSDK10-optimized.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC)}, pages = {17--24}, abstract = {Touch-sensitive screens enable natural interaction without any instrumentation and support tangible feedback on the touch surface. In particular multi-touch interaction has proven its usability for 2D tasks, but the challenges to exploit these technologies in virtual reality (VR) setups have rarely been studied. In this paper we address the challenge to allow users to interact with stereoscopically displayed virtual environments when the input is constrained to a 2D touch surface. During interaction with a large-scale touch display a user changes between three different states: (1) beyond the arm-reach distance from the surface, (2) at arm-reach distance and (3) interaction. We have analyzed the user's ability to discriminate stereoscopic display parallaxes while she moves through these states, i.e., if objects can be imperceptibly shifted onto the interactive surface and become accessible for natural touch interaction. Our results show that the detection thresholds for such manipulations are related to both user motion and stereoscopic parallax, and that users have problems to discriminate whether they touched an object or not, when tangible feedback is expected.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{VSBH10, title = {Traveling in 3D Virtual Environments with Foot Gestures and a Multi-Touch enabled WIM}, author = { Dimitar Valkov and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VSBH10.pdf}, year = {2010}, date = {2010-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {171--180}, abstract = {In this paper we demonstrate how foot gestures can be used to perform navigation tasks in interactive 3D environments and how a World-In-Miniature view can be manipulated trough multi-touch gestures, simplifying the way-finding task in such complex environments. Geographic Information Systems (GIS) are well suited as a complex test-bed for evaluation of user interfaces based on multi-modal input. Recent developments in the area of interactive surfaces enable the construction of low-cost multi-touch sensors and relatively inexpensive technology for detecting foot gestures allows exploring these input modalities for virtual reality environments. In this paper, we describe an intuitive 3D user interface setup, which combines multi-touch hand and foot gestures for interaction with spatial data.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @techreport{Johnson2009ac, title = {A Projector-based Physical Sand Table for Tactical Planning and Review}, author = {Tyler Johnson and Herman Towles and Andrei State and Fu-Che Wu and Greg Welch and Anselmo Lastra and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Johnson2009ac.pdf}, year = {2009}, date = {2009-12-01}, number = {TR09-017}, address = {Chapel Hill, NC USA}, institution = {The University of North Carolina at Chapel Hill, Department of Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @conference{Sadagic2009aa, title = {New Generation of Instrumented Ranges: Enabling Automated Performance Analysis}, author = {Amela Sadagic and Greg Welch and Chumki Basu and Chris Darken and Rakesh Kumar and Henry Fuchs and Hui Cheng and Jan-Michael Frahm and Mathias Kolsch and Neil Rowe and Herman Towles and Juan Wachs and Anselmo Lastra}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Sadagic2009aa_red.pdf}, year = {2009}, date = {2009-11-01}, booktitle = {Proceedings of 2009 Interservice/Industry Training, Simulation, and Education Conference (I/ITSEC-2009)}, address = {Orlando, Florida, U.S.A.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @article{Lincoln2009ac, title = {Animatronic Shader Lamps Avatars}, author = {Peter Lincoln and Greg Welch and Andrew Nashel and Adrian Ilie and Andrei State and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Lincoln2009ac.pdf}, year = {2009}, date = {2009-10-01}, journal = {Proceedings of 8th IEEE and ACM International Symposium on Mixed and Augmented Reality (ISMAR'09)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Welch2009ac, title = {Tracking for AR Tracking Researchers}, author = {Greg Welch and Gerhard Reitmayr and Vincent Lepetit and Brian Clipp}, year = {2009}, date = {2009-10-01}, journal = {Course Notes for Tutorial at International Symposium on Mixed and Augmented Reality (ISMAR 2009)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Martin2009, title = {Automatic Scenario Generation through Procedural Modeling for Scenario-Based Training}, author = {Glenn A. Martin and Sae Schatz and Clint Bowers and Charles Hughes and Jennifer Fowlkes and Denise Nicholson}, url = {https://doi.org/10.1177/154193120905302615 }, doi = {10.1177/154193120905302615 }, year = {2009}, date = {2009-10-01}, booktitle = {Human Factors and Ergonomics Society Annual Meeting Proceedings}, volume = {53}, number = {26}, pages = {1949-1953}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Yang2009aa, title = {3D Motion Segmentation Using Intensity Trajectory}, author = {Hua Yang and Greg Welch and Jan-Michael Frahm and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2009aa.pdf}, year = {2009}, date = {2009-09-01}, journal = {Proceedings of the 9th Asian Conference on Computer Vision (ACCV 2009)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inbook{HughesD2010, title = {Virtual Reality}, author = {Darin E. Hughes and Eileen Smith and Randall Shumaker and Charles Hughes}, editor = {Constantine Stephanidis}, doi = {10.1201/9781420064995-c12}, isbn = {978-0-8058-6280-5}, year = {2009}, date = {2009-06-11}, urldate = {2016-12-05}, booktitle = {Universal Access Handbook}, issuetitle = {Human Factors and Ergonomics}, pages = {12-1:12-10}, publisher = {CRC Press}, chapter = {12}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @article{Beato2009, title = {Interactive Chroma-keying for Mixed Reality}, author = {Nicholas Beato and Yunjun Zhang and Mark Colbert and Kazumasa Yamazawa and Charles Hughes}, doi = {10.1002/cav.305}, year = {2009}, date = {2009-06-01}, journal = {Computer Animation and Virtual Worlds }, volume = {20}, number = {2-3}, pages = {405-415}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Lincoln2009aa, title = {Multi-View Lenticular Display for Group Teleconferencing}, author = {Peter Lincoln and Andrew Nashel and Adrian Ilie and Herman Towles and Gregory Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Lincoln2009aa.pdf}, year = {2009}, date = {2009-05-01}, journal = {Proceedings of IMMERSCOM 2009}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @conference{Walters2009, title = {Shadows of Canaveral: The Application of VR to a Post World War II Subject}, author = {Lori C. Walters and Charles Hughes and Eileen Smith}, url = {http://archive.caaconference.org/2009/articles/Walters_Contribution313_c%20(2).pdf}, isbn = {9781407305561}, year = {2009}, date = {2009-03-22}, urldate = {2016-12-22}, booktitle = {Making History Interactive. Computer Applications and Quantitative Methods in Archaeology (CAA)}, issuetitle = {Proceedings of the 37th International Conference, Williamsburg, Virginia, United States of America, March 22-26}, publisher = {Archaeopres, Oxford}, series = {BAR International Series S2079}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {conference} } @article{Johnson2009aa, title = {A Distributed Cooperative Framework for Continuous Multi-Projector Pose Estimation}, author = {Tyler Johnson and Greg Welch and Eric {La Force} and Herman Towles and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Johnson2009aa.pdf}, year = {2009}, date = {2009-03-01}, journal = {Proceedings of IEEE Virtual Reality 2009}, address = {Lafayette, Louisiana}, organization = {IEEE Compuer Society}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Welch2009aa, title = {HISTORY: The Use of the Kalman Filter for Human Motion Tracking in Virtual Reality}, author = {Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2009aa.pdf}, year = {2009}, date = {2009-01-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {18}, number = {1}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Welch2009ab, title = {3D Medical Collaboration Technology to Enhance Emergency Healthcare}, author = {Gregory Welch and Diane Sonnenwald and Henry Fuchs and Bruce Cairns and Ketan Mayer-Patel and Hanna Söderholm and Ruigang Yang and Andrei State and Herman Towles and Adrian Ilie and Manoj Ampalam and Srinivas Krishnan and Vincent Noel and Michael Noland and James Manning}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2009ab.pdf}, year = {2009}, date = {2009-01-01}, journal = {J Biomed Discov Collab}, volume = {4}, pages = {4}, abstract = {Two-dimensional (2D) videoconferencing has been explored widely in the past 15-20 years to support collaboration in healthcare. Two issues that arise in most evaluations of 2D videoconferencing in telemedicine are the difficulty obtaining optimal camera views and poor depth perception. To address these problems, we are exploring the use of a small array of cameras to reconstruct dynamic three-dimensional (3D) views of a remote environment and of events taking place within. The 3D views could be sent across wired or wireless networks to remote healthcare professionals equipped with fixed displays or with mobile devices such as personal digital assistants (PDAs). The remote professionals' viewpoints could be specified manually or automatically (continuously) via user head or PDA tracking, giving the remote viewers head-slaved or hand-slaved virtual cameras for monoscopic or stereoscopic viewing of the dynamic reconstructions. We call this idea remote 3D medical collaboration. In this article we motivate and explain the vision for 3D medical collaboration technology; we describe the relevant computer vision, computer graphics, display, and networking research; we present a proof-of-concept prototype system; and we present evaluation results supporting the general hypothesis that 3D remote medical collaboration technology could offer benefits over conventional 2D videoconferencing in emergency healthcare.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Fiore2009, title = {Virtual experiments and environmental policy}, author = {Stephen M. Fiore and Glenn W. Harrison and Charles Hughes and E. Elisabet Rutström }, url = {http://dx.doi.org/10.1016/j.jeem.2008.08.002}, doi = {10.1016/j.jeem.2008.08.002}, year = {2009}, date = {2009-01-01}, journal = {Journal of Environmental Economics and Management}, volume = {57}, number = {1}, pages = {65-86}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Varcholik2009, title = {The Bespoke 3DUI XNA Framework: A Low-cost Platform for Prototyping 3D Spatial Interfaces in Video Games}, author = { Paul D. Varcholik and Joseph J. {LaViola Jr.} and Charles Hughes}, url = {http://doi.acm.org/10.1145/1581073.1581082}, doi = {10.1145/1581073.1581082}, isbn = {978-1-60558-514-7}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the 2009 ACM SIGGRAPH Symposium on Video Games}, pages = {55--61}, publisher = {ACM}, address = {New Orleans, Louisiana}, series = {Sandbox '09}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Beato2009b, title = {Evaluating the Potential of Cognitive Rehabilitation with Mixed Reality}, author = { Nicholas Beato and Daniel P. Mapes and Charles E. Hughes and Cali Fidopiastis and Smith Eileen }, editor = {Randall Shumaker}, url = {http://dx.doi.org/10.1007/978-3-642-02771-0_58}, doi = {10.1007/978-3-642-02771-0_58}, isbn = {978-3-642-02771-0}, year = {2009}, date = {2009-01-01}, booktitle = {Virtual and Mixed Reality: Third International Conference, VMR 2009, Held as Part of HCI International 2009, San Diego, CA, USA, July 19-24, 2009. Proceedings}, volume = {5622}, pages = {522--531}, publisher = {Springer Berlin Heidelberg}, address = {Berlin, Heidelberg}, series = {Lecture Notes in Computer Science}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Risi2009, title = {How Novelty Search Escapes the Deceptive Trap of Learning to Learn}, author = { Sebastian Risi and Sandy D. Vanderbleek and Charles E. Hughes and Kenneth O. Stanley}, url = {http://doi.acm.org/10.1145/1569901.1569923}, doi = {10.1145/1569901.1569923}, isbn = {978-1-60558-325-9}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the 11th Annual Conference on Genetic and Evolutionary Computation}, pages = {153--160}, publisher = {ACM}, address = {Montreal, Quebec, Canada}, series = {GECCO '09}, keywords = {A-ceh}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Fidopiastis2009, title = {Mixed Reality for PTSD/TBI Assessment}, author = {Cali Fidopiastis and Charles Hughes and Eileen Smith}, url = {http://www.arctt.info/volume-7-summer-2009}, year = {2009}, date = {2009-01-01}, urldate = {2016-12-22}, booktitle = {Annual Review of Cybertherapy and Telemedicine 2009}, issuetitle = {Advanced Technologies in the Behavioral, Social and Neurosciences}, volume = {7}, number = {Summer 2009}, pages = {216-220}, publisher = {IOS Press BV}, address = {Amsterdam}, chapter = {50}, series = {Studies in Health Technology and Informatics}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inbook{Salva2009, title = {Cognitive Therapy using Mixed Reality for those impaired by a Cerebrovascular Accident (CVA)}, author = {Angela Salva and Brenda K. Wiederhold and Antonio J. Alban and Charles E. Hughes and Eileen Smith and Cali Fidopiastis and Mark D. Wiederhold}, url = {http://www.arctt.info/volume-7-summer-2009}, year = {2009}, date = {2009-01-01}, urldate = {2016-12-22}, booktitle = {Annual Review of Cybertherapy and Telemedicine, 2009}, issuetitle = {Advanced Technologies in the Behavioral, Social, and Neurosciences}, volume = {7}, number = {Summer 2009}, pages = {253-256}, publisher = {IOS Press BV}, address = {Amsterdam}, chapter = {58}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{BSH09, title = {Arch-Explore: A Natural User Interface for Immersive Architectural Walkthroughs}, author = { Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSH09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI)}, pages = {75--82}, publisher = {IEEE Press}, abstract = {In this paper we propose the Arch-Explore user interface, which supports natural exploration of architectural 3D models at different scales in a real walking virtual reality (VR) environment such as head-mounted display (HMD) or CAVE setups. We discuss in detail how user movements can be transferred to the virtual world to enable walking through virtual indoor environments. To overcome the limited interaction space in small VR laboratory setups, we have implemented redirected walking techniques to support natural exploration of comparably large-scale virtual models. Furthermore, the concept of virtual portals provides a means to cover long distances intuitively within architectural models. We describe the software and hardware setup and discuss benefits of Arch-Explore.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSBBSH09, title = {Darstellung physikalischer Objekte in Immersiven Head-Mounted Display Umgebungen}, author = { Annika Busch and Marius Staggenborg and Tobias Brix and Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSBBSH09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {233--244}, publisher = {Shaker Verlag}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBSHG09, title = {Does a Gradual Transition to the Virtual World increase Presence?}, author = { Frank Steinicke and Gerd Bruder and Anthony Steed and Klaus H. Hinrichs and Alexander Gerlach}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBSHG09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {203--210}, publisher = {IEEE Press}, abstract = {In order to increase a user's sense of presence in an artificial environment some researchers propose a gradual transition from reality to the virtual world instead of immersing users into the virtual world directly. One approach is to start the VR experience in a virtual replica of the physical space to accustom users to the characteristics of VR, e.g., latency, reduced field of view or tracking errors, in a known environment. Although this procedure is already applied in VR demonstrations, until now it has not been verified whether the usage of such a transitional environment - as transition between real and virtual environment - increases someone's sense of presence. We have observed subjective, physiological and behavioral reactions of subjects during a fully-immersive flight phobia experiment under two different conditions: the virtual flight environment was displayed immediately, or subjects visited a transitional environment before entering the virtual flight environment. We have quantified to what extent a gradual transition to the VE via a transitional environment increases the level of presence. We have found that subjective responses show significantly higher scores for the user's sense of presence, and that subjects' behavioral reactions change when a transitional environment is shown first. Considering physiological reactions, no significant difference could be found.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{BSRH09, title = {Enhancing Presence in Head-mounted Display Environments by Visual Body Feedback using Head-mounted Cameras}, author = { Gerd Bruder and Frank Steinicke and Kai Rothaus and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSRH09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the International Conference on CyberWorlds}, pages = {43--50}, publisher = {IEEE Press}, abstract = {A fully-articulated visual representation of a user in an immersive virtual environment (IVE) can enhance the user's subjective sense of feeling present in the virtual world. Usually this means that a user has to wear a full-body motion capture suit to track real-world body motions and map them to a virtual body model. In this paper we present an augmented virtuality approach that allows to incorporate a realistic view of oneself in virtual environments using cameras attached to head mounted displays. The described system can easily be integrated into typical virtual reality setups. Egocentric camera images captured by a video-see-through system are segmented in real-time into foreground, showing parts of the user's body, e. g., her hands or feet, and background. The segmented foreground is then displayed as inset in the user's current view of the virtual world. Thus the user is able to see her physical body in an arbitrary virtual world, including individual characteristics such as skin pigmentation, hairiness etc.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBKWLH09, title = {Judgment of Natural Perspective Projections in Head-Mounted Display Environments}, author = { Frank Steinicke and Gerd Bruder and Scott Kuhl and Pete Willemsen and Markus Lappe and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBKWLH09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {35--42}, publisher = {ACM Press}, abstract = {The display units integrated in todays head-mounted displays (HMDs) provide only a limited field of view (FOV) to the virtual world. In order to present an undistorted view to the virtual environment (VE), the perspective projection used to render the VE has to be adjusted to the limitations caused by the HMD characteristics. In particular, the geometric field of view (GFOV), which defines the virtual aperture angle used for rendering of the 3D scene, is set up according to the display's field of view. A discrepancy between these two fields of view distorts the geometry of the VE in such a way that objects and distances appear to be "warped". Although discrepancies between the geometric and the HMD's field of view affect a person's perception of space, the resulting mini- and magnification of the displayed scene can be useful in some applications and may improve specific aspects of immersive virtual environments, for example, distance judgment, presence, and visual search task performance. In this paper we analyze if a user is consciously aware of perspective distortions of the VE displayed in the HMD. We introduce a psychophysical calibration method to determine the HMD's actual field of view, which may vary from the nominal values specified by the manufacturer. Furthermore, we conducted an experiment to identify perspective projections for HMDs, which are perceived as natural by subjects-even if these perspectives deviate from the perspectives that are inherently defined by the display's field of view. We found that subjects evaluate a field of view as natural when it is larger than the actual field of view of the HMD; in some case up to 50%.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRH09a, title = {[POSTER] A Virtual Body for Augmented Virtuality by Chroma-Keying of Egocentric Videos}, author = { Frank Steinicke and Gerd Bruder and Kai Rothaus and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH09a.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation)}, pages = {125--126}, publisher = {IEEE Press}, abstract = {A fully-articulated visual representation of oneself in an immersive virtual environment has considerable impact on the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user's body in the VE. Such a virtual body (VB) is manipulated according to user motions which are defined by feature points detected by a tracking system. The required tracking devices are unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure. In this paper we present a software-based approach that allows to incorporate a realistic visual representation of oneself in the VE. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBHS09, title = {Presence-Enhancing Real Walking User Interface for First-Person Video Games}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Anthony Steed}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHS09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Game Papers}, publisher = {ACM Press}, abstract = {For most first-person video games it is important that players have a high level of feeling presence in the displayed game environment. Virtual reality (VR) technologies have enormous potential to enhance gameplay since players can experience the game immersively from the perspective of the player's virtual character. However, the VR technology itself, such as tracking devices and cabling, has until recently restricted the ability of users to really walk over long distances. In this paper we introduce a VR-based user interface for presenceenhancing gameplay with which players can explore the game environment in the most natural way, i. e., by real walking. While the player walks through the virtual game environment, we guide him/her on a physical path which is different from the virtual path and fits into the VR laboratory space. In order to further increase the VR experience, we introduce the concept of transitional environments. Such a transitional environment is a virtual replica of the laboratory environment, where the VR experience starts and which enables a gradual transition to the game environment. We have quantified how much humans can unknowingly be redirected and whether or not a gradual transition to a first-person game via a transitional environment increases the user's sense of presence.}, note = {(acceptance rate 25%)}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{SBHJFL09, title = {Real Walking through Virtual Environments by Redirection Techniques}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Jason Jerald and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHJFL09.pdf}, year = {2009}, date = {2009-01-01}, journal = {Journal of Virtual Reality and Broadcasting (JVRB)}, volume = {6}, number = {2}, pages = {1--16}, abstract = {We present redirection techniques that support exploration of large-scale virtual environments (VEs) by means of real walking. We quantify to what degree users can unknowingly be redirected in order to guide them through VEs in which virtual paths differ from the physical paths. We further introduce the concept of dynamic passive haptics by which any number of virtual objects can be mapped to real physical proxy props having similar haptic properties (i. e., size, shape, and surface structure), such that the user can sense these virtual objects by touching their real world counterparts. Dynamic passive haptics provides the user with the illusion of interacting with a desired virtual object by redirecting her to the corresponding proxy prop. We describe the concepts of generic redirected walking and dynamic passive haptics and present experiments in which we have evaluated these concepts. Furthermore, we discuss implications that have been derived from a user study, and we present approaches that derive physical paths which may vary from the virtual counterparts.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {article} } @inproceedings{BSHL09, title = {Reorientation during Body Turns}, author = { Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSHL09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC)}, pages = {145--152}, abstract = {Immersive virtual environment (IVE) systems allow users to control their virtual viewpoint by moving their tracked head and by walking through the real world, but usually the virtual space which can be explored by walking is restricted to the size of the tracked space of the laboratory. However, as the user approaches an edge of the tracked walking area, reorientation techniques can be applied to imperceptibly turn the user by manipulating the mapping between real-world body turns and virtual camera rotations. With such reorientation techniques, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments we have quantified how much users can unknowingly be reoriented during body turns. We tested 18 subjects in two different experiments. First, in a just-noticeable difference test subjects had to perform two successive body turns between which they had to discriminate. In the second experiment subjects performed body turns that were mapped to different virtual camera rotations. Subjects had to estimate whether the visually perceived rotation was slower or faster than the physical rotation. Our results show that the detection thresholds for reorientation as well as the point of subjective equality between real movement and visual stimuli depend on the virtual rotation angle.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBHLRI09, title = {Transitional Environments Enhance Distance Perception in Immersive Virtual Reality Systems}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Markus Lappe and Brian Ries and Victoria Interrante}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHLRI09.pdf}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV)}, pages = {19--26}, publisher = {ACM Press}, abstract = {Several experiments have provided evidence that ego-centric distances are perceived as compressed in immersive virtual environments relative to the real world. The principal factors responsible for this phenomenon have remained largely unknown. However, recent experiments suggest that when the virtual environment (VE) is an exact replica of a user's real physical surroundings, the person's distance perception improves. Furthermore, it has been shown that when users start their virtual reality (VR) experience in such a virtual replica and then gradually transition to a different VE, their sense of presence in the actual virtual world increases significantly. In this case the virtual replica serves as a transitional environment between the real and virtual world. In this paper we examine whether a person's distance estimation skills can be transferred from a transitional environment to a different VE. We have conducted blind walking experiments to analyze if starting the VR experience in a transitional environment can improve a person's ability to estimate distances in an immersive VR system. We found that users significantly improve their distance estimation skills when they enter the virtual world via a transitional environment.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRH09, title = {POSTER: Visual Identity from Egocentric Camera Images for Head-Mounted Display Environments}, author = { Frank Steinicke and Gerd Bruder and Kai Rothaus and Klaus H. Hinrichs}, year = {2009}, date = {2009-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC) (Poster Proceedings)}, pages = {289--290}, abstract = {A number of researchers have reported that a fully-articulated visual representation of oneself in an immersive virtual environment (IVE) has considerable impact on social interaction and the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user’s body in the VE. Usually, a fully-articulated visual identity or or socalled “virtual body” is manipulated according to user motions which are defined by feature points detected by a tracking system. Therefore, markers have to be attached to certain feature points as done, for instance, with full-body motion coats which have to be worn by the user. Such instrumentation is unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure where the tracked data is always mapped to the same invariant 3D model. In this paper we present a software-based approach that allows to incorporate a realistic visual identity of oneself in the VE, which can be integrated easily into existing hardware setups. In our setup we focus on visual representation of a user's arms and hands. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body, i. e., the human's hands, and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Welch2008ab, title = {Tracking for Training in Virtual Environments: Estimating the Pose of People and Devices for Simulation and Assessment}, author = {Greg Welch and Larry Davis}, editor = {Joseph Cohn and Denise Nicholson and Dylan Schmorrow}, url = {https://www.amazon.com/Handbook-Virtual-Environments-Training-Education/dp/0313351651 https://sreal.ucf.edu/wp-content/uploads/2018/02/Welch2008aa-pre.pdf}, doi = {10.1336/0313351651}, year = {2008}, date = {2008-11-01}, booktitle = {The PSI Handbook of Virtual Environments for Training and Education: Developments for the Military and Beyond}, publisher = {Praeger Security International}, chapter = {30}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inbook{HughesD2008, title = {The Application and Evaluation of Mixed Reality Simulation}, author = {Darin E. Hughes and C. Jerome and Charles Hughes and Eileen Smith}, editor = {Joseph Cohn and Denise Nicholson and Dylan Schmorrow}, year = {2008}, date = {2008-11-01}, booktitle = {The PSI Handbook of Virtual Environments for Training and Education: Developments for the Military and Beyond}, volume = {3}, pages = {254-277}, publisher = {Praeger Security International}, address = {Westport, Connecticut}, chapter = {25}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inbook{Walters2008, title = {The Future of Museum Experiences}, author = {Lori C. Walters and Eileen Smith and Charles Hughes}, editor = {Joseph Cohn and Denise Nicholson and Dylan Schmorrow }, year = {2008}, date = {2008-11-01}, booktitle = {The PSI Handbook of Virtual Environments for Training and Education: Developments for the Military and Beyond}, volume = {3}, pages = {444-452}, publisher = {Praeger Security International}, address = {Westport, CT}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Walters2008b, title = {Come Back to the Fair}, author = {Lori C. Walters and Charles Hughes and Eileen Smith}, url = {http://eproc.vsmm2008.org/vsmm2008/e_Proceedings/papers/projectpapers.pdf#page=295}, year = {2008}, date = {2008-10-20}, urldate = {2016-12-22}, booktitle = {Digital Heritage - Proceedings of the 14th International Conference on Virtual Systems and Multimedia (VSMM 08), Limassol, Cyprus, October 20-26, 2008 }, pages = {289-293}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Ilie2008aa, title = {A Stochastic Quality Metric for Optimal Control of Active Camera Network Configurations for 3D Computer Vision Tasks}, author = {Adrian Ilie and Greg Welch and Marc Macenko}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2008aa.pdf}, year = {2008}, date = {2008-10-01}, booktitle = {Proceedings of ECCV 2008 workshop on Multi-camera and Multi-modal Sensor Fusion Algorithms and Applications}, address = {Marseille, France}, organization = {European Conference on Computer Vision (ECCV)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @article{Soderholm2008aa, title = {Exploring the potential of video technologies for collaboration in emergency medical care. Part II: Task performance}, author = {Hanna Söderholm and Diane Sonnenwald and James Manning and Bruce Cairns and Gregory Welch and Henry Fuchs}, url = {http://www3.interscience.wiley.com/journal/121378034/abstract https://sreal.ucf.edu/wp-content/uploads/2017/02/Soderholm2008aa.pdf}, doi = {10.1002/asi.20939}, year = {2008}, date = {2008-08-01}, journal = {Journal of the American Society for Information Science and Technology}, volume = {59}, number = {14}, pages = {2335--2349}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Sonnenwald2008aa, title = {Exploring the potential of video technologies for collaboration in emergency medical care. Part I: Information sharing}, author = {Diane Sonnenwald and Hanna Söderholm and James Manning and Bruce Cairns and Gregory Welch and Henry Fuchs}, url = {http://www3.interscience.wiley.com/journal/121378032/abstract?CRETRY=1&SRETRY=0 https://sreal.ucf.edu/wp-content/uploads/2017/02/Sonnenwald2008aa.pdf}, doi = {10.1002/asi.20934}, year = {2008}, date = {2008-08-01}, journal = {Journal of the American Society for Information Science and Technology}, volume = {59}, number = {14}, pages = {2320--2334}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Fidopiastis2008, title = {Workshop 1: Use of psychophysiological measures in virtual rehabilitation}, author = {Cali Fidopiastis and Charles Hughes}, doi = {10.1109/ICVR.2008.4625109}, issn = {2331-9542}, year = {2008}, date = {2008-08-01}, booktitle = {2008 Virtual Rehabilitation}, pages = {xi-xi}, keywords = {A-ceh}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Clipp2008, title = {A Mobile 3D City Reconstruction System}, author = {Brian Clipp and Rahul Raguram and Jan-Michael Frahm and Gregory Welch and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Clipp2008.pdf}, year = {2008}, date = {2008-03-09}, booktitle = {Workshop on Virtual Cityscapes, IEEE Virtual Reality}, address = {Reno, Nevada, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Dieker2008, title = {Implications of Mixed Reality and Simulation Technologies on Special Education and Teacher Preparation}, author = {Lisa Dieker and Michael Hynes and Charles Hughes and Eileen Smith}, year = {2008}, date = {2008-02-01}, journal = {Focus on Exceptional Children }, volume = {40}, number = {6}, pages = {1-20}, publisher = {Publishing Co}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Pollefeys2008, title = {Detailed Real-Time Urban 3D Reconstruction from Video}, author = {Marc Pollefeys and David Nistér and Jan-Michael Frahm and Amir Akbarzadeh and Philippos Mordohai and Brian Clipp and Chris Engels and David Gallup and Seon-Joo Kim and Paul Merrell and C. Salmi and Sudipta Sinha and Brad Talton and Liang Wang and Qing-Xiong Yang and Henrik Stewénius and Ruigang Yang and Greg Welch and Herman Towles}, url = {http://dx.doi.org/10.1007/s11263-007-0086-4 https://sreal.ucf.edu/wp-content/uploads/2017/02/Pollefeys2008.pdf}, doi = {10.1007/s11263-007-0086-4}, issn = {1573-1405}, year = {2008}, date = {2008-01-01}, journal = {International Journal of Computer Vision}, volume = {78}, number = {2}, pages = {143--167}, abstract = {The paper presents a system for automatic, geo-registered, real-time 3D reconstruction from video of urban scenes. The system collects video streams, as well as GPS and inertia measurements in order to place the reconstructed models in geo-registered coordinates. It is designed using current state of the art real-time modules for all processing steps. It employs commodity graphics hardware and standard CPU's to achieve real-time performance. We present the main considerations in designing the system and the steps of the processing pipeline. Our system extends existing algorithms to meet the robustness and variability necessary to operate out of the lab. To account for the large dynamic range of outdoor videos the processing pipeline estimates global camera gain changes in the feature tracking stage and efficiently compensates for these in stereo estimation without impacting the real-time performance. The required accuracy for many applications is achieved with a two-step stereo reconstruction process exploiting the redundancy across frames. We show results on real video sequences comprising hundreds of thousands of frames.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Brunnett2008, title = {08231 Abstracts Collection -- Virtual Realities}, author = {Guido Brunnett and Sabine Coquillart and Greg Welch}, editor = {Guido Brunnett and Sabine Coquillart and Greg Welch}, url = {http://drops.dagstuhl.de/opus/volltexte/2008/1634 http://drops.dagstuhl.de/opus/volltexte/2008/1634/pdf/08231_abstracts_collection.1634.pdf https://sreal.ucf.edu/wp-content/uploads/2017/02/Brunnett2008.pdf}, issn = {1862-4405}, year = {2008}, date = {2008-01-01}, booktitle = {Virtual Realities}, number = {08231}, publisher = {Schloss Dagstuhl - Leibniz-Zentrum fuer Informatik, Germany}, address = {Dagstuhl, Germany}, series = {Dagstuhl Seminar Proceedings}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SWBH08, title = {[POSTER] A User Guidance Approach for Passive Haptic Environments}, author = { Frank Steinicke and Hanno Weltzel and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SWBH08.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the Eurographics Symposium on Virtual Environments (EGVE) (Short Paper and Poster Proceedings)}, pages = {31--34}, abstract = {Traveling through virtual environments (VEs) by means of real walking is a challenging task since usually the size of the virtual world exceeds the size of the tracked interaction space. Redirected walking is one concept to solve this problem by guiding the user on a physical path which differs from the path the user visually perceives, for example, in head-mounted display (HMD) environments. The user can be redirected to certain locations in the physical space, in particular to real proxy objects which provide emphpassive feedback. In such passive haptic environments, any number of virtual objects can be mapped to proxy objects having similar haptic properties, i.e., size, shape and surface structure. When the user is guided to corresponding proxy objects, s/he can sense virtual objects by touching their real world counterparts. Therefore it is vital to predict the user's movements in the virtual world in order to recognize the target location. Based on the prediction a transformed path can determined in the physical space on which the user is guided to the desired proxy object. In this paper we present concepts how a user's path can be predicted reliably and how a corresponding path to a desired proxy object can be derived on which the user does not observe inconsistencies between vision and proprioception.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{SBHRL08, title = {Advances in Human-Computer Interaction: 3D User Interfaces for Collaborative Works}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Timo Ropinski and Mario Lopes}, year = {2008}, date = {2008-01-01}, pages = {279--294}, publisher = {In-Tech}, abstract = {Desktop environments have proven to be a powerful user interface and are used as the de facto standard human-computer interaction paradigm for over 20 years. However, there is a rising demand on 3D applications dealing with complex datasets, which exceeds the possibilities provided by traditional devices or two-dimensional display. For these domains more immersive and intuitive interfaces are required. But in order to get the users' acceptance, technology-driven solutions that require inconvenient instrumentation, e.g., stereo glasses or tracked gloves, should be avoided. Autostereoscopic display environments equipped with tracking systems enable users to experience 3D virtual environments more natural without annoying devices, for instance via gestures. However, currently these approaches are only applied for specially designed or adapted applications without universal usability. Although these systems provide enough space to support multi-user, additional costs and inconvenient instrumentation hinder acceptance of these user interfaces. In this chapter we introduce new collaborative 3D user interface concepts for such setups where minimal instrumentation of the user is required such that the strategies can be easily integrated in everyday working environments. Therefore, we propose an interaction system and framework, which allows displaying and interacting with both mono- as well as stereoscopic content in parallel. Furthermore, the setup enables multiple users to view the same data simultaneously. The challenges for combined mouse-, keyboard- and gesture-based input paradigms in such an environment are pointed out and novel interaction strategies are introduced.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{SBJFL08, title = {Analyses of Human Sensitivity to Redirected Walking}, author = { Frank Steinicke and Gerd Bruder and Jason Jerald and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBJFL08.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {149--156}, abstract = {Redirected walking allows users to walk through large-scale immersive virtual environments (IVEs) while physically remaining in a reasonably small workspace by intentionally injecting scene motion into the IVE. In a constant stimuli experiment with a two-alternative-forced-choice task we have quantified how much humans can unknowingly be redirected on virtual paths which are different from the paths they actually walk. 18 subjects have been tested in four different experiments: (E1a) discrimination between virtual and physical rotation, (E1b) discrimination between two successive rotations, (E2) discrimination between virtual and physical translation, and discrimination of walking direction (E3a) without and (E3b) with start-up. In experiment E1a subjects performed rotations to which different gains have been applied, and then had to choose whether or not the visually perceived rotation was greater than the physical rotation. In experiment E1b subjects discriminated between two successive rotations where different gains have been applied to the physical rotation. In experiment E2 subjects chose if they thought that the physical walk was longer than the visually perceived scaled travel distance. In experiment E3a subjects walked a straight path in the IVE which was physically bent to the left or to the right, and they estimate the direction of the curvature. In experiment E3a the gain was applied immediately, whereas the gain was applied after a start-up of two meters in experiment E3b. Our results show that users can be turned physically about 68% more or 10% less than the perceived virtual rotation, distances can be up- or down-scaled by 22%, and users can be redirected on an circular arc with a radius greater than $24$ meters while they believe they are walking straight.}, note = {(acceptance rate 17%)}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRH08a, title = {Moving Towards Generally Applicable Redirected Walking}, author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH08a.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)}, pages = {15--24}, publisher = {IEEE Press}, abstract = {Walking is the most natural way of moving within a virtual environment (VE). Mapping the user's movement one-to-one to the real world clearly has the drawback that the limited range of the tracking sensors and a rather small working space in the real word restrict the user's interaction. In this paper we introduce concepts for virtual locomotion interfaces that support exploration of large-scale virtual environments by redirected walking. Based on the results of a user study we have quantified to which degree users can unknowingly be redirected in order to guide them through an arbitrarily sized VE in which virtual paths differ from the paths tracked in the real working space. We describe the concepts of generic redirected walking in detail and present implications that have been derived from the initially conducted user study. Furthermore we discuss example applications from different domains in order to point out the benefits of our approach.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRHFL08a, title = {[POSTER] A Universal Virtual Locomotion System: Supporting Generic Redirected Walking and Dynamic Passive Haptics within Legacy 3D Graphics Applications}, author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRHFL08a.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR) (Poster Presentation)}, pages = {192--193}, publisher = {IEEE Press}, abstract = {General interest in visualizations of digital 3D cityscapes is growing rapidly, and several applications are already available that display such models very realistically. In order to generate a virtual 3D city model different approaches exist which use miscellaneous input data sources, for example, 2D maps, aerial images, or laser-scanned data. However, 3D landmarks which denote highly-complex and architecturally prominent buildings, e.g., churches, castles, spires etc., cannot be reproduced in an adequate manner by automatic reconstruction. Therefore, these entities are usually modeled manually by architectural offices or 3D design companies. In recent years user interfaces of 3D modeling applications have evolved such that these applications are widely accepted and easy-to-use - even for non-experts. In this paper we present a field report on the manual modeling of 3D landmarks in which two classes have participated. In cooperation with two different schools we have performed two projects which eight and ninth grades students. In each project every student has chosen a particular 3D building of sufficient complexity to model. All models have been integrated into our city visualization environment. We present the results as well as the experience that we have made during this project.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRHFL08, title = {[POSTER] Generic Redirected Walking & Dynamic Passive Haptics: Evaluation and Implications for Virtual Locomotion Interfaces}, author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs and Harald Frenz and Markus Lappe}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRHFL08.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation)}, pages = {147--148}, publisher = {IEEE Press}, abstract = {In this paper we introduce a virtual locomotion system that allows navigation within any large-scale virtual environment (VE) by real walking. Based on the results of a user study we have quantified how much users can unknowingly be redirected in order to guide them through an arbitrarily sized VE in which virtual paths differ from the paths tracked in the real working space. Furthermore we introduce the new concept of dynamic passive haptics. This concept allows to map any number of virtual objects to real emphproxy objects having similar haptic properties, i.e., size, shape and surface structure, such that the user can sense these virtual objects by touching their real world counterparts. This mapping may be changed dynamically during runtime and need not be one-to-one. Thus dynamic passive haptics provides the user with the illusion of interacting with a desired virtual object by redirecting her/him to the corresponding proxy object. Since the mapping between virtual and proxy objects can be changed dynamically, a small number of proxy objects suffices to represent a much larger number of virtual objects. We describe the concepts in detail and discuss their parameterization which has been derived from the initially conducted user study. Furthermore we explain technical details regarding the integration into legacy 3D graphics applications, which is based on an interceptor library allowing to trace and modify 3D graphics calls. Thus when the user is tracked s/he is able to explore any 3D scene by natural walking, which we demonstrate by 3D graphics applications from different domains.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBKJH08, title = {Taxonomy and Implementation of Redirection Techniques for Ubiquitous Passive Haptic Feedback}, author = { Frank Steinicke and Gerd Bruder and Luv Kohli and Jason Jerald and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBKJH08.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the International Conference on Cyberworlds (CW)}, pages = {217--223}, publisher = {IEEE Press}, abstract = {Traveling through immersive virtual environments (IVEs) by means of real walking is an important activity to increase naturalness of VR-based interaction. However, the size of the virtual world often exceeds the size of the tracked space so that a straightforward implementation of omni-directional and unlimited walking is not possible. Redirected walking is one concept to solve this problem of walking in VEs by inconspicuously guiding the user on a physical path that may differ from the path the user visually perceives. When the user approaches a virtual object she can be redirected to a real proxy object that is registered to the virtual counterpart and provides passive haptic feedback. In such passive haptic environments, any number of virtual objects can be mapped to proxy objects having similar haptic properties, e.g., size, shape and texture. The user can sense a virtual object by touching its real world counterpart. Redirecting a user to a registered proxy object makes it necessary to predict the user's target location in the VE. Based on this prediction we determine a path through the physical space such that the user is guided to the registered proxy object. We present a taxonomy of possible redirection techniques that enable user guidance such that inconsistencies between visual and proprioceptive stimuli are imperceptible. We describe how a user's target in the virtual world can be predicted reliably and how a corresponding real-world path to the registered proxy object can be derived.}, note = {(acceptance rate 39%)}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBRH08, title = {The Holodeck Construction Manual}, author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH08.pdf}, year = {2008}, date = {2008-01-01}, booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH) (Conference DVD)}, abstract = {Immersive virtual reality (IVR) systems allow users to interact in virtual environments (VEs), but in these systems, e.g., six-wall CAVEs with outside-in optical tracking, presence is limited to the virtual world and the physical surrounding cannot be perceived. Real walking is the most intuitive way of moving through such a setup as well as through our real world. Unfortunately, typical IVEs have only limited interaction space in contrast to the potentially infinite VE. In the last years an enormous effort has been undertaken in order to allow omnidirectional walking of arbitrary distances in VEs. Appropriate hardware-based approaches are very costly Bouguila and thus will probably not get beyond a prototype stage in the near future. We propose an alternative approach, which is motivated by the theory of perception. We exploit the fact that the human's visual sense may vary from the proprioceptive and vestibular senses without humans noticing a difference. Thus it becomes possible to direct the user on a physical path which may vary from the path perceived in the IVE. To exploit this limitation of the human sensory system we have extended redirected walking by introducing motion compression and gains, which scale the real distance a user walks, rotation compression and gains, which make the real turns smaller or larger, and curvature gains, which bend the user's walking direction such that s/he walks on a curve. Furthermore, we propose the concept of dynamic passive haptics which extends passive haptics in such a way that any number of virtual objects can be sensed by means of real proxy objects having similar haptic capabilities. Thus, dynamic passive haptics provide the user with the illusion of interacting with a desired virtual object by touching a corresponding proxy object. By exploiting these proposed concepts, finally the virtual holodeck construction manual can be written. Such an IVE provides sufficient space to make the users walk arbitrarily and sense any objects in the VE by means of touching an associated proxy object.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @article{Soderholm2007lr, title = {The Potential Impact of 3D Telepresence Technology on Task Performance in Emergency Trauma Care}, author = {Hanna Söderholm and Diane Sonnenwald and Bruce Cairns and James Manning and Gregory Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Soderholm2007lr.pdf}, year = {2007}, date = {2007-11-01}, journal = {Proceedings of the ACM Group 2007 Conference}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Fidopiastis2007, title = {[POSTER] Developing Baseline Assessments for Virtual Rehabilitation Environments}, author = {Cali Fidopiastis and Denise Nicholson and Charles Hughes and Eileen Smith}, year = {2007}, date = {2007-10-04}, booktitle = {4th INTUITION International Conference and Workshop 2007, Athens, Greece, October 4-5, 2007}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @article{Colbert2007, title = {Painting in High Dynamic Range}, author = {Mark Colbert and Erik Reinhard and Charles Hughes}, doi = {10.1016/j.jvcir.2007.03.002}, year = {2007}, date = {2007-10-01}, journal = {Journal of Visual Communication and Image Representation}, volume = {18}, number = {5}, pages = {387-396}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Clipp2007lr, title = {Structure from Motion via a Two-Stage Pipeline of Extended Kalman Filters}, author = {Brian Clipp and Greg Welch and Jan-Michael Frahm and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Clipp2007lr.pdf}, year = {2007}, date = {2007-09-01}, journal = {Proceedings of the British Machine Vision Conference (BMVC 2007)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Fidopiastis2007b, title = {[POSTER] Assessing Virtual Rehabilitation Design with Biophysiological Metrics}, author = {Cali M. Fidopiastis and Charles E. Hughes and Eileen. M. Smith and Denise. M. Nicholson}, doi = {10.1109/ICVR.2007.4362142}, issn = {2331-9542}, year = {2007}, date = {2007-09-01}, booktitle = {2007 Virtual Rehabilitation, Venice, Italy, September 27-29, 2007}, pages = {86-86}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inbook{Zhang2007, title = {Model-Guided Luminance Range Enhancement in Mixed Reality}, author = {Yunjun Zhang and Charles E. Hughes}, editor = {Mohamed Kamel and Aurélio Campilho}, doi = {10.1007/978-3-540-74260-9_103}, isbn = {978-3-540-74260-9}, year = {2007}, date = {2007-08-22}, booktitle = {Image Analysis and Recognition: 4th International Conference, ICIAR 2007, Montreal, Canada, August 22-24, 2007. Proceedings}, volume = {4633}, pages = {1160-1171}, publisher = {Springer Berlin Heidelberg}, series = {Lecture Notes in Computer Science }, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Mordohai2007lr, title = {Real-Time Video-Based Reconstruction of Urban Environments}, author = {Philippos Mordohai and Jan-Michael Frahm and Amir Akbarzadeh and Brian Clipp and Chris Engels and David Gallup and Paul Merrell and Christina Salmi and Sudipta Sinha and Brad Talton and Liang Wang and Qing-Xiong Yang and Henrik Stewénius and Herman Towles and Greg Welch and Ruigang Yang and Marc Pollefeys and David Nistér}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Mordohai2007lr.pdf}, year = {2007}, date = {2007-07-01}, booktitle = {Proceedings of the ISPRS Working Group V/4 Workshop 3D-ARCH 2007: 3D Virtual Reconstruction and Visualization of Complex Architectures}, publisher = {GITC bv, PO Box112, 8530 AC Lemmer, The Netherlands}, address = {ETH Zurich, Switzerland}, organization = {ISPRS Commission V WG 4; NRC-CNRC National Research Council Canada; Institute of Geodesy and Photogrammetry, ETH Zurich, Switzerland; IRST - ITC Trento, Italy}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Yang2007, title = {Differential Camera Tracking through Linearizing the Local Appearance Manifold}, author = {Hua Yang and Marc Pollefeys and Greg Welch and Jan-Michael Frahm and Adrian Ilie}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2007.pdf}, doi = {10.1109/CVPR.2007.382978}, issn = {1063-6919}, year = {2007}, date = {2007-06-01}, booktitle = {2007 IEEE Conference on Computer Vision and Pattern Recognition}, pages = {1-8}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Micikevicius2007, title = {Visibility-based Forest Walk-through Using Inertial Level of Detail Model}, author = {Paulius Micikevicius and Charles Hughes}, doi = {10.1177/154851290700400202 }, year = {2007}, date = {2007-04-01}, journal = {Journal of Defense Modeling and Simulation }, volume = {4}, number = {2}, pages = {80-96}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Welch2007lr, title = {Complementary Tracking and Two-Handed Interaction for Remote 3D Medical Consultation with a PDA}, author = {Greg Welch and Michael Noland and Gary Bishop}, editor = {Gabriel Zachmann}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2007lr.pdf}, year = {2007}, date = {2007-03-01}, booktitle = {Proceedings of Trends and Issues in Tracking for Virtual Environments, Workshop at the IEEE Virtual Reality 2007 Conference}, publisher = {Shaker}, address = {Charlotte, NC USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch2007uq, title = {Measurement Sample Time Optimization for Human Motion Tracking/Capture Systems}, author = {Greg Welch and B. Danette Allen and Adrian Ilie and Gary Bishop}, editor = {Gabriel Zachmann}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2007uq.pdf}, year = {2007}, date = {2007-03-01}, booktitle = {Proc. IEEE VR 2007 Workshop on 'Trends and Issues in Tracking for Virtual Environments'}, publisher = {Shaker Verlag, Aachen, Germany}, address = {Charlotte, NC, USA}, organization = {IEEE}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Fiore2007, title = {Virtual experiments and environmental policy}, author = {Stephen M. Fiore and Glenn W. Harrison and Charles E. Hughes and E. Elisabet Rutström }, year = {2007}, date = {2007-02-01}, booktitle = {Frontiers of Environmental Economics, Washington, February 2007}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Dieker2007, title = {Virtual Classrooms: STAR Simulator}, author = {Lisa Dieker and Michael Hynes and Christopher Stapleton and Charles E. Hughes}, year = {2007}, date = {2007-02-01}, booktitle = {New Learning Technologies 2007, Orlando, FL, February 2007}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Fidopiastis2007b, title = {[POSTER] Human Experience Modeler; Context-Driven Cognitive Retraining to Facilitate Transfer of Learning}, author = {Cali M. Fidopiastis and Christopher Stapleton and J. D. Whiteside and Charles E. Hughes and Stephen M. Fiore and Glenn A. Martin and J. P. Rolland and Eileen. M. Smith}, year = {2007}, date = {2007-01-21}, booktitle = {International Brain Conference, January 19-21, 2007, Poster}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Konttinen2007, title = {[POSTER] Image-space Particle Emission}, author = { Jaakko Konttinen and Sumanta Pattanaik and Charles E. Hughes}, url = {http://doi.acm.org/10.1145/1280720.1280928}, doi = {10.1145/1280720.1280928}, isbn = {978-1-4503-1828-0}, year = {2007}, date = {2007-01-01}, booktitle = {ACM SIGGRAPH 2007 Posters}, publisher = {ACM}, address = {San Diego, California}, series = {SIGGRAPH '07}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SRBH07c, title = {3D Modeling and Design Supported via Interscopic Interaction Strategies}, author = { Frank Steinicke and Timo Ropinski and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SRBH07c.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of HCI International}, volume = {4553}, pages = {1160--1169}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {3D modeling applications are widely used in many application domains ranging from CAD to industrial or graphics design. Desktop environments have proven to be a powerful user interface for such tasks. However, the raising complexity of 3D dataset exceeds the possibilities provided by traditional devices or two-dimensional display. Thus, more natural and intuitive interfaces are required. But in order to get the users' acceptance technology-driven solutions that require inconvenient instrumentation, e.g., stereo glasses or tracked gloves, should be avoided. Autostereoscopic display environments in combination with 3D desktop devices enable users to experience virtual environments more immersive without annoying devices. In this paper we introduce interaction strategies with special consideration of the requirements of 3D modelers. We propose an interscopic display environment with implicated user interface strategies that allow displaying and interacting with both mono-, e.g., 2D elements, and stereoscopic content, which is beneficial for the 3D environment, which has to be manipulated. These concepts are discussed with special consideration of the requirements of 3D modeler and designers.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBF07, title = {A Multimodal Locomotion User Interface for Immersive Geospatial Information Systems}, author = { Frank Steinicke and Gerd Bruder and Harald Frenz}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBF07.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of GI-Days}, pages = {289--293}, abstract = {In this paper we present a new multimodal locomotion user interface that enables users to travel through 3D environments displayed in geospatial information systems, e.g., Google Earth or Microsoft Virtual Earth. When using the proposed interface the geospatial data can be explored in immersive virtual environments (VEs) using stereoscopic visualization on a head-mounted display (HMD). When using certain tracking approaches the entire body can be tracked in order to support natural traveling by real walking. Moreover, intuitive devices are provided for both-handed interaction to complete the navigation process. We introduce the setup as well as corresponding interaction concepts.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{RSBH07, title = {Focus+Context Resolution Adaption for Autostereoscopic Displays}, author = { Timo Ropinski and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, editor = {Andreas Butz and Brian D. Fisher and Antonio Krüger and Patrick Olivier and Shigeru Owada}, year = {2007}, date = {2007-01-01}, booktitle = {Smart Graphics}, volume = {4569}, pages = {188--193}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SRBH07, title = {Interscopic User Interface Concepts for Fish Tank Virtual Reality Systems}, author = { Frank Steinicke and Timo Ropinski and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SRBH07.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of IEEE Virtual Reality (VR)}, pages = {27--34}, publisher = {IEEE Press}, abstract = {In this paper we introduce new user interface concepts for fish tank virtual reality (VR) systems based on autostereoscopic (AS) display technologies. Such AS displays allow to view stereoscopic content without requiring special glasses. Unfortunately, until now simultaneous monoscopic and stereoscopic display was not possible. Hence prior work on fish tank VR systems focussed either on 2D or 3D interactions. In this paper we introduce so called interscopic interaction concepts providing an improved working experience, which enable great potentials in terms of the interaction between 2D elements, which may be displayed either in monoscopic or stereoscopic, e.g., GUI items, and the 3D virtual environment usually displayed stereoscopically. We present a framework which is based on a software layer between the operating system and its graphical user interface supporting the display of both mono- as well as stereoscopic content in arbitrary regions of an autostereoscopic display. The proposed concepts open up new vistas for the interaction in environments where essential parts of the GUI are displayed monoscopically and other parts are rendered stereoscopically. We address some essential issues of such fish tank VR systems and introduce intuitive interaction concepts which we have realized.}, note = {(acceptance rate 20%)}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBH07, title = {[POSTER] Hybrid Traveling in Fully-Immersive Large-Scale Geographic Environments}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBH07.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality and Software Technology (VRST) (Poster Presentation)}, pages = {229--230}, abstract = {In this paper we present hybrid traveling concepts that enable users to navigate immersively through 3D geospatial environments displayed by applications such as Google Earth or Microsoft Virtual Earth. We propose a framework which allows to integrate virtual reality (VR) based interaction devices and concepts into such applications that do not support VR technologies natively. In our proposed setup the content displayed by a geospatial application is visualized stereoscopically on a head-mounted display (HMD) for immersive exploration. The user's body can be tracked by using appropriate technologies in order to support natural traveling through the VE via a walking metaphor. Since the VE usually exceeds the dimension of the area in which the user can be tracked, we propose different strategies to map the user's movement into the virtual world. Moreover, intuitive devices and interaction techniques are presented for both-handed interaction to enrich the navigation process. In this paper we will describe the technical system setup as well as integrated interaction concepts and discuss scenarios based on existing geospatial visualization applications.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SBHR07, title = {Simultane 2D/3D User Interface Konzepte f"ur Autostereoskopische Desktop-VR Systeme}, author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Timo Ropinski}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHR07.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)}, pages = {125--132}, publisher = {Shaker}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{SRBH07b, title = {Towards Applicable 3D User Interfaces for Everyday Working Environments}, author = { Frank Steinicke and Timo Ropinski and Gerd Bruder and Klaus H. Hinrichs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SRBH07b.pdf}, year = {2007}, date = {2007-01-01}, booktitle = {Proceedings of the International Conference on Human-Computer Interaction (INTERACT)}, volume = {4662}, pages = {546--559}, publisher = {Springer}, series = {Lecture Notes in Computer Science}, abstract = {Desktop environments have proven to be a powerful user interface and are used as the de facto standard human-computer interaction paradigm for over 40 years. However, there is a rising demand on 3D applications dealing with complex datasets, which exceeds the possibilities provided by traditional devices or two-dimensional display.For these domains more immersive and intuitive interfaces are required. But in order to get the users' acceptance, technology-driven solutions that require inconvenient instrumentation, e.g., stereo glasses or tracked gloves, should be avoided. Autostereoscopic display environments equipped with tracking systems enable users to experience 3D virtual environments more natural without annoying devices, for instance via gestures. However, currently these approaches are only applied for specially designed or adapted applications without universal usability. In this paper we introduce new 3D user interface concepts for such setups where minimal instrumentation of the user is required such that the strategies can be easily integrated in everyday working environments. Therefore, we propose an interaction system and framework which allows to display and interact with both mono- as well as stereoscopic content simultaneously. The challenges for combined mouse-, keyboard- and gesture-based input paradigms in such an environment are pointed out and novel interaction strategies are introduced.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Sonnenwald2006uq, title = {Experimental Comparison of the Use of 2D and 3D Telepresence Technologies in Distributed Emergency Medical Situations}, author = {Diane Sonnenwald and Hanna Söderholm and Bruce Cairns and Eugene Freid and James Manning and Gregory Welch and Henry Fuchs}, editor = {A. Grove}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Sonnenwald2006uq.pdf}, year = {2006}, date = {2006-11-01}, booktitle = {Proceedings of the 69th Annual Meeting of the American Society of Information Science and Technology (ASIS&T 2006)}, volume = {43}, publisher = {American Society for Information Science and Technology}, address = {Washington, DC}, organization = {American Society of Information Science and Technology}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @inbook{Hughes2006, title = {The Evolution of a Framework for Mixed Reality Experiences}, author = {Charles Hughes and Christopher Stapleton and Matthew O'Connor}, editor = {Michael Haller and Mark Billinghurst and Bruce Thomas}, url = {http://www.igi-global.com/book/emerging-technologies-augmented-reality/338}, doi = {10.4018/978-1-59904-066-0.ch010}, isbn = {9781599040660}, year = {2006}, date = {2006-11-01}, urldate = {2016-12-05}, booktitle = {Emerging Technologies of Augmented Reality: Interfaces and Design}, pages = {198-217}, publisher = {IGI Global}, address = {Hershey, PA}, chapter = {10}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inbook} } @inbook{Stapleton2006, title = {Making Memories for a Lifetime}, author = {Christopher Stapleton and Charles Hughes}, editor = {Michael Haller and Mark Billinghurst and Bruce Thomas}, url = {http://www.igi-global.com/book/emerging-technologies-augmented-reality/338}, doi = {10.4018/978-1-59904-066-0.ch016}, isbn = {9781599040660}, year = {2006}, date = {2006-11-01}, urldate = {2016-12-05}, booktitle = {Emerging Technologies of Augmented Reality: Interfaces and Design}, pages = {329-351}, publisher = {IGI Global}, address = {Hershey, PA}, chapter = {16}, keywords = {A-ceh}, pubstate = {published}, tppubtype = {inbook} } @inproceedings{Szumlanski2006, title = {Conflict Resolution and a Framework for Collaborative Interactive Evolution}, author = {Sean R Szumlanski and Annie S. Wu and Charles E. Hughes}, url = {http://dl.acm.org/citation.cfm?id=1597538.1597621}, isbn = {978-1-57735-281-5}, year = {2006}, date = {2006-07-16}, booktitle = {Proceedings of the 21st National Conference on Artificial Intelligence - Volume 1}, pages = {512-517}, publisher = {AAAI Press}, address = {Boston, Massachusetts}, series = {AAAI'06}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Welch2006c, title = {System and method for animating real objects with projected images }, author = {Greg Welch and Kok-Lim Low and Ramesh Raskar}, url = {https://patents.google.com/patent/US7068274B2/en?oq=US+7%2c068%2c274 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=7068274.PN.&OS=PN/7068274&RS=PN/7068274}, year = {2006}, date = {2006-06-27}, number = {US 7068274B2}, location = {US}, abstract = {A computer implemented method animates a 3D physical object by first acquiring a 3D graphics model of the object. The model is edited with graphics authoring tools to reflect a desired appearance of the object. The edited model is rendered as an image considering a user location and a location of a virtual light. Then, intensity values of the image are corrected according to an orientation of a surface of the object and a radiance at the surface. The 3D physical object can finally be illuminated with the corrected image to give the 3D physical object the desired appearance under the virtual light when viewed from the user location.}, note = {Filed: 2001-08-15}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Nister:2006fr, title = {Towards Urban 3D Reconstruction From Video}, author = {David Nistér and Marc Pollefeys and Amir Akbarzadeh and Jan-Michael Frahm and Philippos Mordohai and Ruigang Yang and Brian Clipp and Chris Engels and David Gallup and Paul Merrell and Michael Phelps and Sudipta Sinha and Brad Talton and Liang Wang and Qing-Xiong Yang and Henrik Stewénius and Greg Welch and Herman Towles}, year = {2006}, date = {2006-06-01}, booktitle = {Proceedings of the Third International Symposium on 3D Data Processing, Visualization and Transmission (3DPVT 2006)}, address = {Chapel Hill, NC}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Akbarzadeh2006fr, title = {Towards Urban 3D Reconstruction From Video}, author = {Amir Akbarzadeh and Jan-Michael Frahm and Philippos Mordohai and Brian Clipp and Chris Engels and David Gallup and Paul Merrell and Michael Phelps and Sudipta Sinha and Brad Talton and Liang Wang and Qing-Xiong Yang and Henrik Stewénius and Ruigang Yang and Greg Welch and Herman Towles and David Nistér and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Akbarzadeh2006.pdf}, year = {2006}, date = {2006-06-01}, booktitle = {Proceedings of the Third International Symposium on 3D Data Processing, Visualization and Transmission (3DPVT 2006)}, address = {Chapel Hill, NC}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Yang2006_3DPVT, title = {Illumination Insensitive Model-Based 3D Object Tracking and Texture Refinement}, author = {Hua Yang and Greg Welch and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2006_3DPVT.pdf}, doi = {10.1109/3DPVT.2006.79}, year = {2006}, date = {2006-06-01}, booktitle = {3D Data Processing, Visualization, and Transmission, Third International Symposium on}, pages = {869-876}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Yang2006c, title = {Illumination Insensitive Model-Based 3D Object Tracking and Texture Refinement}, author = {Hua Yang and Greg Welch}, year = {2006}, date = {2006-06-01}, booktitle = {3D Data Processing, Visualization, and Transmission, Third International Symposium on}, address = {Chapel Hill, NC USA}, organization = {The University of North Carolina at Chapel Hill}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @inproceedings{yang2006, title = {Stereovision on GPU}, author = { Ruigang Yang and Liang Wang and Greg Welch and Marc Pollefeys}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2006.pdf}, year = {2006}, date = {2006-05-23}, booktitle = {2006 Workshop on Edge Computing Using New Commodity Architectures}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch2006, title = {GPU‐Based View Synthesis Using an Orbital Reconstruction Frustum}, author = {Greg Welch and Hua Yang and Andrei State and Vincent Noel and Adrian Ilie and Ruigang Yang and Marc Pollefeys and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2006.pdf}, year = {2006}, date = {2006-05-23}, booktitle = {2006 Workshop on Edge Computing Using New Commodity Architectures}, address = {Chapel Hill, NC, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @conference{YangR2006, title = {[POSTER] Stereovision on GPU}, author = {Ruigang Yang and Liang Wang and Greg Welch and Marc Pollefeys}, year = {2006}, date = {2006-05-23}, booktitle = {2006 Workshop on Edge Computing Using New Commodity Architectures (EDGE 2006)}, address = {Chapel Hill, NC, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @conference{welch2006c, title = {[POSTER] GPU‐Based View Synthesis Using an Orbital Reconstruction Frustum}, author = {Greg Welch and Hua Yang and Andrei State and Vincent Noel and Adrian Ilie and Ruigang Yang and Marc Pollefeys and Henry Fuchs}, year = {2006}, date = {2006-05-23}, booktitle = {2006 Workshop on Edge Computing Using New Commodity Architectures (EDGE 2006)}, address = {Chapel Hill, NC, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @article{Fidopiastis2006, title = {Human Experience Modeler: Context-Driven Cognitive Retraining to Facilitate Transfer of Learning}, author = {Cali M. Fidopiastis and Christopher Stapleton and J. D. Whiteside and Charles Hughes and Stephen M. Fiore and Glenn A. Martin and J. P. Rolland and Eileen Smith }, doi = {10.1089/cpb.2006.9.183}, year = {2006}, date = {2006-04-01}, journal = {CyberPsychology and Behavior }, volume = {9}, number = {2}, pages = {183-187}, publisher = {Mary Ann Liebert, Inc.}, address = {New Rochelle, NY}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{State2006, title = {An Interactive Camera Placement and Visibility Simulator for Image-Based VR Applications}, author = {Andrei State and Greg Welch and Adrian Ilie}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/State2006-red.pdf}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the Engineering Reality of Virtual Reality 2006}, address = {San Jose, CA}, organization = {IS&T/SPIE 18th Annual Symposium on Electronic Imaging Science and Technology}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @conference{State2006ah, title = {An Interactive Camera Placement and Visibility Simulator for Image-Based VR Applications}, author = {Andrei State and Greg Welch and Adrian Ilie}, url = {https://sreal.ucf.edu/wp-content/uploads/2018/02/State2006_Pandora_red.pdf}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of The Engineering Reality of Virtual Reality 2006 (3D Imaging, Interaction, and Measurement; IS&T/SPIE 18th Annual Symposium on Electronic Imaging Science and Technology)}, address = {San Jose, CA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @article{Welch2006, title = {Remote 3D Medical Consultation}, author = {Gregory Welch and Henry Fuchs and Bruce Cairns and Ketan Mayer-Patel and Diane Sonnenwald and Ruigang Yang and Andrei State and Herman Towles and Adrian Ilie and Manoj Ampalam and Srinivas Krishnan and Hanna Söderholm and Vincent Noel and Michael Noland}, year = {2006}, date = {2006-01-01}, journal = {Journal of Mobile Multimedia (JMM)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Stapleton2016, title = {Believing is Seeing: Cultivating Radical Media Innovations}, author = {Christopher Stapleton and Charles Hughes }, doi = {10.1109/MCG.2006.12 }, isbn = {0272-1716}, year = {2006}, date = {2006-01-01}, journal = {IEEE Computer Graphics and Applications}, volume = {26}, number = {1}, pages = {88-93}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{SRHB06, title = {A Multiple View System for Modeling Building Entities}, author = { Frank Steinicke and Timo Ropinski and Klaus H. Hinrichs and Gerd Bruder}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SRHB06.pdf}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the International Conference on Coordinated & Multiple Views in Exploratory Visualization}, pages = {69--78}, publisher = {IEEE Press}, abstract = {Modeling virtual buildings is an essential task in the city planning domain whereas several aspects have essential influence. Planners have to deal with different types of potentially multiform datasets; moreover they have to consider certain guidelines and constraints that are imposed on the development areas to which buildings are related. The planning process can be divided into different subtasks with varying requirements regarding the interaction techniques that are used for their accomplishment. To incorporate these aspects multiple view systems have proven enormous potential in order to provide efficient user interfaces. In this paper, we present strategies for modeling virtual building entities via a multiple view system as part of a 3D decision support system that enables the intuitive generation and evaluation of building proposals. City planners have been involved in the design process of the system, in particular the multiple view concepts. Therefore each view of the system, which visualizes different aspects concerning the underlying models, meets the demands of city planners. Furthermore, we present both coupled and uncoupled interaction techniques between different views with respect to requirements of the city planning domain.}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{RSBH06, title = {Simultaneously Viewing Monoscopic and Stereoscopic Content on Vertical-Interlaced Autostereoscopic Displays}, author = { Timo Ropinski and Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs}, year = {2006}, date = {2006-01-01}, booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH) (Conference DVD)}, publisher = {ACM Press}, keywords = {A-gb}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch2005ix, title = {Improving, Expanding and Extending 3D Telepresence}, author = {Gregory Welch and Henry Fuchs and Bruce Cairns and Ketan Mayer-Patel and Diane Sonnenwald and Ruigang Yang and Andrei State and Herman Towles and Adrian Ilie and Michael Noland and Vincent Noel and Hua Yang}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2005ix_red.pdf}, year = {2005}, date = {2005-12-01}, booktitle = {Proceedings of the 2005 International Workshop on Advanced Information Processing for Ubiquitous Networks}, address = {Christchurch, New Zealand}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Xu2005, title = {HDR Still Image Encoding in JPEG 2000}, author = {Ruifeng Xu and Sumanta Pattanaik and Charles Hughes}, doi = {10.1109/MCG.2005.133 }, year = {2005}, date = {2005-11-07}, journal = {IEEE Computer Graphics and Applications }, volume = {25}, number = {6}, pages = {57-64}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @article{Hughes2005, title = {Mixed Reality in Education, Entertainment and Training: An Interdisciplinary Approach}, author = {Charles Hughes and Christopher Stapleton and Darin E. Hughes and Eileen Smith }, doi = {10.1109/MCG.2005.139 }, year = {2005}, date = {2005-11-07}, journal = {IEEE Computer Graphics and Applications }, volume = {25}, number = {6}, pages = {24-30}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Yang2005kk, title = {Model-Based 3D Object Tracking Using an Extended-Extended Kalman Filter and Graphics Rendered Measurements}, author = {Hua Yang and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2005kk.pdf}, year = {2005}, date = {2005-11-01}, booktitle = {Proceedings of 1st Computer Vision for Interactive and Intelligent Environments (CV4IIE) Workshop}, address = {Lexington, KY}, organization = {University of Kentucky}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Ilie2005jp, title = {Ensuring Color Consistency across Multiple Cameras}, author = {Adrian Ilie and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2005jp.pdf}, year = {2005}, date = {2005-10-01}, booktitle = {Proceedings of International Conference on Computer Vision (ICCV)}, volume = {2}, pages = {1268--1275}, address = {Beijing, China}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @conference{Welch2005et, title = {Remote 3D Medical Consultation}, author = {Greg Welch and Diane Sonnenwald and Ketan Mayer-Patel and Ruigang Yang and Andrei State and Herman Towles and Bruce Cairns and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2005et_red.pdf}, year = {2005}, date = {2005-10-01}, booktitle = {Proceedings of BROADNETS: 2nd IEEE/CreateNet International Conference on Broadband Networks}, pages = {103--110}, publisher = {Omnipress}, address = {Boston, MA, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {conference} } @inproceedings{Fidopiastis2005, title = {Human Experience Modeler: Context Driven Cognitive Retraining and Narrative Threads}, author = {Cali M. Fidopiastis and Christopher Stapleton and J. D. Whiteside and Charles E. Hughes and Stephen M. Fiore and Glenn A. Martin and J. P. Rolland and Eileen. M. Smith}, year = {2005}, date = {2005-09-19}, booktitle = {Proceedings of the 4th International Workshop on Virtual Rehabilitation (IWVR2005), Catalina Island, CA, September 19-21}, pages = {120-134}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Raskar2005, title = {System and method for registering multiple images with three-dimensional objects }, author = {Ramesh Raskar and Gregory F. Welch and Kok-Lim Low}, url = {https://patents.google.com/patent/US6930681B2/en?oq=US+6%2c930%2c681 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO2&Sect2=HITOFF&p=1&u=%2Fnetahtml%2FPTO%2Fsearch-bool.html&r=1&f=G&l=50&co1=AND&d=PTXT&s1=6,930,681.PN.&OS=PN/6,930,681&RS=PN/6930681}, year = {2005}, date = {2005-08-16}, number = {US 630681B2}, location = {US}, abstract = {A computer implemented method registers an image with a 3D physical object by first acquiring a 3D graphics model of an object. Multiple 3D calibration points a surface of the object and corresponding 3D model calibration points in the 3D graphics model are identified. The object is illuminated with a calibration image using a projector at a fixed location. The calibration image is aligned with each of the 3D calibration points on the surface of the 3D physical object to identify corresponding 2D pixels in the calibration image, and then a transformation between the 2D calibration pixels and the corresponding 3D model calibration points is determined to register the projector with the 3D physical object.}, note = {Filed: 2014-08-14}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @inproceedings{Hughes2005b, title = {The Shared Imagination: Creative Collaboration in Augmented Virtuality}, author = {Charles Hughes and Christopher Stapleton}, year = {2005}, date = {2005-07-22}, booktitle = {Proceedings of HCI International 2005, Las Vegas, NV, July 22-27}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Liu2005, title = {Deducing Behaviors from Primitive Movement Attributes}, author = {Danzhou Liu and Charles E. Hughes}, url = {http://dx.doi.org/10.1117/12.604149}, doi = {10.1117/12.604149}, year = {2005}, date = {2005-04-05}, booktitle = {Proceedings of SPIE Defense and Security Symposium, Orlando, FL, March 28 – April 1}, issuetitle = {Data Mining, Intrusion Detection, Information Assurance, and Data Networks Security 2005}, volume = {5812}, pages = {180-189}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Stapleton2005, title = {Mixed Reality and Experiential Movie Trailers: Combining Emotions and Immersion to Innovate Entertainment Marketing}, author = {Christopher Stapleton and Charles E. Hughes}, year = {2005}, date = {2005-01-30}, booktitle = {Proceedings of 2005 International Conference on Human-Computer Interface Advances in Modeling and Simulation (SIMCHI’05), New Orleans, January 23-27}, pages = {40-48}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{O'Connor2005, title = {Authoring and Delivering Mixed Reality Experiences}, author = {Matthew O'Connor and Charles E. Hughes}, year = {2005}, date = {2005-01-23}, booktitle = {Proceedings of 2005 International Conference on Human-Computer Interface Advances in Modeling and Simulation (SIMCHI’05), New Orleans, January 23-27}, pages = {33-39}, series = {SIMCHI’05}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Hughes2005b, title = {Designing a System for Effective Use of Immersive Audio in Mixed Reality}, author = {Darin E. Hughes and Scott Vogelpohl and Charles E. Hughes}, year = {2005}, date = {2005-01-23}, booktitle = {Proceedings of 2005 International Conference on Human-Computer Interface Advances in Modeling and Simulation (SIMCHI’05), New Orleans, January 23-27}, pages = {51-57}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Allen2005, title = {A general method for comparing the expected performance of tracking and motion capture systems}, author = {B. Danette Allen and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Allen2005.pdf}, doi = {http://doi.acm.org/10.1145/1101616.1101658}, isbn = {1-59593-098-1}, year = {2005}, date = {2005-01-01}, booktitle = {VRST '05: Proceedings of the ACM symposium on Virtual reality software and technology}, pages = {201--210}, publisher = {ACM Press}, address = {Monterey, CA, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Welch2005a, title = {Immersive Electronic Books for Surgical Training}, author = {Greg Welch and Ruigang Yang and Sascha Becker and Adrian Ilie and Dan Russo and Jesse Funaro and Andrei State and Kok-Lim Low and Anselmo Lastra and Herman Towles and Bruce Cairns and Henry Fuchs and Andy van Dam}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2005a.pdf}, year = {2005}, date = {2005-01-01}, journal = {IEEE Multimedia}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @incollection{Welch2005aa, title = {3D Telepresence for Off-Line Surgical Training and On-Line Remote Consultation}, author = {Greg Welch and Ruigang Yang and Bruce Cairns and Herman Towles and Andrei State and Adrian Ilie and Sascha Becker and Dan Russo and Jesse Funaro and Diane Sonnenwald and Ketan Mayer-Patel and B. Danette Allen and Hua Yang and Eugene Freid and Andy van Dam and Henry Fuchs}, editor = {Susumu Tachi}, year = {2005}, date = {2005-01-01}, booktitle = {Telecommunication, Teleimmersion and Telexistence II}, pages = {113--152}, publisher = {IOS Press (English) and Ohmsha (Japanese)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @incollection{Adabala2005, title = {Gridless Controllable Fire}, author = {Neeharika Adabala and Charles Hughes}, editor = {Kim Pallister}, isbn = {ISBN 1-58450-352-1}, year = {2005}, date = {2005-01-01}, booktitle = {Game Programming Gems 5}, pages = {539-549}, publisher = {Charles River Media}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {incollection} } @article{Konttinen2005, title = {The Future of Mixed Reality: Issues in Illumination and Shadows}, author = {Kaakko Konttinen and Charles Hughes and Sumanta Pattanaik}, doi = {10.1177/154851290500200104 }, year = {2005}, date = {2005-01-01}, journal = {Journal of Defense Modeling and Simulation }, volume = {2}, number = {1}, pages = {29-37}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {article} } @inproceedings{Stapleton:2005, title = {The Art of Nurturing Citizen Scientists Through Mixed Reality}, author = { Christopher Stapleton and Eileen Smith and Charles E. Hughes}, url = {http://dx.doi.org/10.1109/ISMAR.2005.58}, doi = {10.1109/ISMAR.2005.58}, isbn = {0-7695-2459-1}, year = {2005}, date = {2005-01-01}, booktitle = {Proceedings of the 4th IEEE/ACM International Symposium on Mixed and Augmented Reality}, pages = {2--11}, publisher = {IEEE Computer Society}, address = {Washington, DC, USA}, series = {ISMAR '05}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Szumlanski:2005, title = {[POSTER] Collaborative Interactive Evolution}, author = { Sean R. Szumlanski and Annie S. Wu and Charles E. Hughes}, url = {http://doi.acm.org/10.1145/1068009.1068373}, doi = {10.1145/1068009.1068373}, isbn = {1-59593-010-8}, year = {2005}, date = {2005-01-01}, booktitle = {Proceedings of the 7th Annual Conference on Genetic and Evolutionary Computation}, pages = {2199--2200}, publisher = {ACM}, address = {Washington DC, USA}, series = {GECCO '05}, keywords = {A-ceh, SREAL}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Welch2004b, title = {3D Telepresence for Off-Line Surgical Training and On-Line Remote Consultation}, author = {Greg Welch and Ruigang Yang and Bruce Cairns and Herman Towles and Andrei State and Adrian Ilie and Sascha Becker and Dan Russo and Jesse Funaro and Diane Sonnenwald and Ketan Mayer-Patel and B. Danette Allen and Hua Yang and Eugene Freid and Andy van Dam and Henry Fuchs}, editor = {Susumu Tachi}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2004b_CREST.pdf}, year = {2004}, date = {2004-12-01}, booktitle = {Proceedings of ICAT CREST Symposium on Telecommunication, Teleimmersion, and Telexistence}, address = {The University of Tokyo, Tokyo, Japan}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Ilie2004, title = {Combining Head-Mounted and Projector-Based Displays for Surgical Training}, author = {Adrian Ilie and Kok-Lim Low and Greg Welch and Anselmo Lastra and Henry Fuchs and Bruce Cairns}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Ilie2004_Hybrid.pdf}, year = {2004}, date = {2004-04-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {13}, number = {2}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @patent{Raskar2004, title = {Method for cross-fading intensities of multiple images of a scene for seamless reconstruction }, author = {Ramesh Raskar and Gregory F. Welch and Kok-Lim Low}, url = {https://patents.google.com/patent/US6677956B2/en?oq=US+6%2c677%2c956 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=6,677,956.PN.&OS=PN/6,677,956&RS=PN/6,677,956}, year = {2004}, date = {2004-01-13}, number = {US 6677956B2}, location = {US}, abstract = {A computer implemented method cross-fades intensities of a plurality of overlapping images by identifying pixels in a target image that are only produced by a first source image. The weights of all the corresponding pixels in the first source image are set to one. Pixels in a second source images contributing to the target image are similarly identified and set to one. the weight of each remaining pixel in the first and second images is inversely proportional to a distance to a nearest pixel having a weight of one. Then, the first and second source image can be projected to form the target image.}, note = {Filed: 2001-08-15}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @article{Yang2004, title = {A Unified Approach to Real-Time, Multi-Resolution, Multi-Baseline 2D View Synthesis and 3D Depth Estimation using Commodity Graphics Hardware}, author = {Ruigang Yang and Marc Pollefeys and Hua Yang and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2004.pdf}, year = {2004}, date = {2004-01-01}, journal = {International Journal of Image and Graphics (IJIG)}, volume = {4}, number = {4}, pages = {1--25}, abstract = {We present a new method for using commodity graphics hardware to achieve real-time, on-line, 2D view synthesis or 3D depth estimation from two or more calibrated cameras. Our method combines a 3D plane-sweeping approach with 2D multi-resolution color consistency tests. We project camera imagery onto each plane, compute measures of color consistency throughout the plane at multiple resolutions, and then choose the color or depth (corresponding plane) that is most consistent. The key to achieving realtime performance is our use of the advanced features included with recent commodity computer graphics hardware to implement the computations simultaneously (in parallel) across all reference image pixels on a plane.parOur method is relatively simple to implement, and flexible in term of the number and placement of cameras. With two cameras and an NVIDIA GeForce4 graphics card we can achieve 50M disparity evaluations per second, including image download and read-back overhead. This performance matches the fastest available commercial software-only implementation of correlation-based stereo algorithms, while freeing up the CPU for other uses.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Low2003, title = {Combining Head-Mounted and Projector-Based Displays for Surgical Training}, author = {Kok-Lim Low and Adrian Ilie and Greg Welch and Anselmo Lastra}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Low2003.pdf}, isbn = {0-7695-1882-6}, year = {2003}, date = {2003-01-01}, booktitle = {Proceedings of the IEEE Virtual Reality 2003}, pages = {110}, publisher = {IEEE Computer Society}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Yang2003c, title = {Dealing with Textureless Regions and Specular Highlights---A Progressive Space Carving Scheme Using a Novel Photo-consistency Measure}, author = {Ruigang Yang and Marc Pollefeys and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2003c.pdf}, year = {2003}, date = {2003-01-01}, booktitle = {Proceedings of the 9th International Conference on Computer Vision}, pages = {576584}, publisher = {IEEE Computer Society}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Yang2003ip, title = {Real-Time Consensus-Based Scene Reconstruction using Commodity Graphics Hardware}, author = {Ruigang Yang and Greg Welch}, year = {2003}, date = {2003-01-01}, journal = {Computer Graphics Forum (invited submission)}, volume = {22}, number = {2}, pages = {207--216}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @misc{Cairns2003, title = {Three Dimensional (3D) Acquisitition and Display of Reality: The Potential for a `Holodeck' in Trauma Surgery}, author = {Bruce A. Cairns and Greg Welch and Adrian Ilie and Ruigang Yang and Kok‐Lim Low and Anselmo Lastra and Henry Fuchs and Anthony Meyer}, year = {2003}, date = {2003-01-01}, institution = {The American Association for the Surgery of Trauma 2003}, note = {Presented at The American Association for the Surgery of Trauma 2003 Annual Meeting}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {misc} } @inproceedings{vandam2002, title = {Immersive Electronic Books for Teaching Surgical Procedures}, author = {Andy van Dam and Henry Fuchs and Sascha Becker and Loring Holden and Adrian Ilie and Kok-Lim Low and Anne Morgan Spalter and Ruigang Yang and Gregory Welch}, editor = {Susumu Tachi}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/VanDam2002.pdf}, year = {2002}, date = {2002-12-01}, booktitle = {Proceedings of ICAT CREST Symposium on Telecommunication, Teleimmersion, and Telexistence}, address = {The University of Tokyo, Tokyo, Japan}, abstract = {This paper reports on early progress with the use of immersive virtual reality technology for trauma surgery training. We discuss our technical goals and the application area, and then describe our work to date. The need to create a system that can be used by overworked, highly time-constrained surgeons and surgical trainees has affected many of our decisions, from the type of displays used to the focus on time navigation (to let trainees experience important moments and skip well-understood ones) to the use of easilylearned traditional 2D interfaces (vs. more demanding innovative 3D interfaces) for some of the interaction methods. This three-year research project, which is just entering its second year, is supported by a National Science Foundation Information TechnoloResearch grant for collaborative research between groups at Brown University and the University of North Carolina at Chapel Hill.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Yang2002a, title = {Real-Time Consensus-Based Scene Reconstruction using Commodity Graphics Hardware}, author = {Ruigang Yang and Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2002a.pdf}, year = {2002}, date = {2002-10-01}, booktitle = {Proceedings of Pacific Graphics 2002}, pages = {225--234}, address = {Tsinghua University, Beijing, China}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Welch2002, title = {Motion Tracking: No Silver Bullet, but a Respectable Arsenal}, author = {Greg Welch and Eric Foxlin}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2002.pdf}, doi = {10.1109/MCG.2002.1046626}, issn = {0272-1716}, year = {2002}, date = {2002-01-01}, journal = {IEEE Computer Graphics and Applications}, volume = {22}, number = {6}, pages = {24--38}, publisher = {IEEE Computer Society Press}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @inproceedings{Yang2002ab, title = {Real-time View Synthesis Using Commodity Graphics Hardware}, author = {Ruigang Yang and Greg Welch and Gary Bishop and Herman Towles}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2002ab.pdf}, doi = {10.1145/1242073.1242253}, isbn = {1-58113-525-4}, year = {2002}, date = {2002-01-01}, booktitle = {ACM SIGGRAPH 2002 Conference Abstracts and Applications}, pages = {240--240}, publisher = {ACM}, address = {San Antonio, Texas}, series = {SIGGRAPH '02}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Yang2002b, title = {Fast image segmentation and smoothing using commodity graphics hardware}, author = {Ruigang Yang and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2002b.pdf}, issn = {1086-7651}, year = {2002}, date = {2002-01-01}, journal = {J. Graph. Tools}, volume = {7}, number = {4}, pages = {91--100}, publisher = {A. K. Peters, Ltd.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @article{Superfine2002, title = {Touching in Biological Systems: A 3D Force Microscope}, author = { Richard Superfine and Gary Bishop and Jeremy Cummings and Jay Fisher and Kurtis Keller and Gerald Matthews and D. Sill and Russell M. Taylor and Leandra Vicci and Chris Weigle and Greg Welch and Benjamin Wilde}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Superfine2002_abstract.pdf}, year = {2002}, date = {2002-01-01}, journal = {Microscopy and Microanalysis}, volume = {8}, number = {S02}, pages = {174--175}, publisher = {Cambridge Univ Press}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @misc{Bishop2001aa, title = {Course 11---Tracking: Beyond 15 Minutes of Thought}, author = {Gary Bishop and Greg Welch and B. Danette Allen}, url = {http://www.cs.unc.edu/~tracker/ref/s2001/tracker/}, year = {2001}, date = {2001-08-01}, howpublished = {urlhttp://www.cs.unc.edu/~tracker/ref/s2001/tracker/}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {misc} } @incollection{Allen2001, title = {Tracking: Beyond 15 Minutes of Thought: SIGGRAPH 2001 Course 11}, author = {B. Danette Allen and Gary Bishop and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Allen2001.pdf http://www.cs.unc.edu/~tracker/ref/s2001/tracker/}, year = {2001}, date = {2001-01-01}, booktitle = {Computer Graphics}, publisher = {ACM Press, Addison-Wesley}, address = {Los Angeles, CA, USA (August 12-17)}, edition = {SIGGRAPH 2001 Course Pack}, series = {Annual Conference on Computer Graphics & Interactive Techniques}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @incollection{Low2001, title = {Life-Sized Projector-Based Dioramas}, author = {Kok-Lim Low and Greg Welch and Anselmo Lastra and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Low2001.pdf}, year = {2001}, date = {2001-01-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology}, pages = {8}, publisher = {ACM SIGGRAPH, Addison-Wesley}, address = {Banff Cantre, Banff, Alberta, Canada (November 15-17, 2001)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{Majumder2001, title = {COMPUTER GRAPHICS OPTIQUE: Optical Superposition of Projected Computer Graphics}, author = {Aditi Majumder and Greg Welch }, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Majumder2001-red.pdf}, year = {2001}, date = {2001-01-01}, booktitle = {Fifth Immersive Projection Technology Workshop, in conjunction with the Seventh Eurographics Workshop on Virtual Environments}, publisher = {Springer-Verlag}, address = {Stuttgart, Germany}, abstract = {We present some ideas and demonstrations for a hybrid projector-based rendering and display technique we call Computer Graphics Optique. In-stead of partially overlapping projected images to achieve a wide-area display, we completely overlap projected images on top of each other to achieve the addition of light and color in an optical composition buffer. The idea is to use the optical composition to replace some analytical computation, to increase rendering speed, gain flexibility, intensity range, and intensity resolution. Where projector-based displays are appropriate, potential uses include the optical realization of certain effects normally requiring a digital accumulation buffer, the optical composition of heterogeneous lighting techniques, and the ability to use heterogeneous graph-ics engines, in parallel. In addition one can make use of the optical projector control of focus augmented with the optical superposition to achieve effects that are otherwise computationally expensive. We believe that this technique offers the possibility of a new paradigm for combined rendering and projector-based display.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Raskar2001, title = {Shader Lamps: Animating Real Objects With Image-Based Illumination}, author = {Ramesh Raskar and Greg Welch and Kok-Lim Low and Deepak Bandyopadhyay}, editor = {S. J. Gortler and K. Myszkowski}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar2001.pdf}, year = {2001}, date = {2001-01-01}, booktitle = {Rendering Techniques 2001, Proceedings of the Eurographics Workshop in London, United Kingdom}, pages = {89-102}, publisher = {Springer, NewYork}, address = {University College London (UCL), London, England}, abstract = {We describe a new paradigm for three-dimensional computer graphics, using projectors to graphically animate physical objects in the real world. The idea is to replace a physical objectwith its inherent color, texture, and material propertieswith a neutral object and projected imagery, reproducing the original (or alternative) appearance directly on the object. Because the approach is to effectively lift the visual properties of the object into the projector, we call the projectors shader lamps. We address the central issue of complete and continuous illumination of non-trivial physical objects using multiple projectors and present a set of new techniques that makes the process of illumination practical. We demonstrate the viability of these techniques through a variety of table-top applications, and describe preliminary results to reproduce life-sized virtual spaces.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @incollection{Welch2001, title = {An Introduction to the Kalman filter: SIGGRAPH 2001 Course 8}, author = {Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2001.pdf}, year = {2001}, date = {2001-01-01}, booktitle = {Computer Graphics}, publisher = {ACM Press, Addison-Wesley}, address = {Los Angeles, CA, USA (August 12-17)}, edition = {SIGGRAPH 2001 Course Pack}, series = {Annual Conference on Computer Graphics & Interactive Techniques}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @article{Welch2001b, title = {High-Performance Wide-Area Optical Tracking: The HiBall Tracking System}, author = {Greg Welch and Gary Bishop and Leandra Vicci and Stephen Brumback and Kurtis Keller and D'nardo Colucci}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2001b.pdf}, year = {2001}, date = {2001-01-01}, journal = {Presence: Teleoperators and Virtual Environments}, volume = {10}, number = {1}, pages = {1-21}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @misc{Welch2001c, title = {The Design and Evaluation of Multi-Sensor Tracking Systems: A Mathematical Framework and Graphical Tools (ITR Small Proposal #0112851)}, author = {Greg Welch and Gary Bishop}, year = {2001}, date = {2001-01-01}, publisher = {NSF}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {misc} } @inproceedings{Yang2001, title = {Automatic and Continuous Projector Display Surface Estimation Using Every-Day Imagery}, author = {Ruigang Yang and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Yang2001.pdf}, year = {2001}, date = {2001-01-01}, booktitle = {Proceedings of 9th International Conference in Central Europe on Computer Graphics, Visualization and Computer Vision 2001}, address = {Plzen, Czech Republic}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Bishop2000, title = {Working in the Office of 'Real Soon Now'}, author = {Gary Bishop and Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Bishop2000.pdf}, year = {2000}, date = {2000-07-01}, journal = {IEEE Computer Graphics and Applications}, volume = {20}, number = {4}, pages = {76-78}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @techreport{Raskar2000, title = {Shader Lamps: Animating Real Objects with Image-Based Illumination}, author = {Ramesh Raskar and Greg Welch and Kok-Lim Low}, year = {2000}, date = {2000-06-06}, number = {00-027}, institution = {University of North Carlina at Chapel Hill}, type = {techreport}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @incollection{Chen2000, title = {Toward a Compelling Sensation of Telepresence: Demonstrating a Portal to a Distant (Static) Office}, author = {Wei-Chao Chen and Herman Towles and Lars Nyland and Greg Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Chen2000.pdf}, year = {2000}, date = {2000-01-01}, booktitle = {Proceedings of IEEE Visualization 2000}, publisher = {IEEE Computer Science Press}, address = {Salt Lake City, UT, USA (October 8 - 13)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{Majumder2000, title = {Achieving color uniformity across multi-projector displays}, author = {Aditi Majumder and Zhu He and Herman Towles and Greg Welch}, url = {http://portal.acm.org/citation.cfm?id=375213.375227 https://sreal.ucf.edu/wp-content/uploads/2017/02/Majumder2000.pdf}, isbn = {1-58113-309-X}, year = {2000}, date = {2000-01-01}, booktitle = {Proceedings of the conference on Visualization '00}, pages = {117--124}, publisher = {IEEE Computer Society Press}, address = {Salt Lake City, Utah, United States}, series = {VIS '00}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @article{Welch2000a, title = {Projected Imagery In Your Office in the Future}, author = {Greg Welch and Henry Fuchs and Ramesh Raskar and Michael Brown and Herman Towles}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch2000a.pdf}, year = {2000}, date = {2000-01-01}, journal = {IEEE Computer Graphics and Applications}, volume = {20}, number = {4}, pages = {62-67}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @techreport{YangR2000, title = {Automatic Display Surface Estimation using Everyday Imagery}, author = {Ruigang Yang and Greg Welch}, year = {2000}, date = {2000-01-01}, number = {TR00-015}, institution = {University of North Carolina at Chapel Hill, Department of Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @inproceedings{Welch1999, title = {The HiBall Tracker: High-Performance Wide-Area Tracking for Virtual and Augmented Environments}, author = {Greg Welch and Gary Bishop and Leandra Vicci and Stephen Brumback and Kurtis Keller and D'nardo Colucci}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1999.pdf}, doi = {10.1145/323663.323664 }, year = {1999}, date = {1999-12-01}, booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)}, pages = {1--11}, publisher = {ACM Press, Addison-Wesley Publishing Company}, organization = {Association of Computing Machinery}, note = {Best Paper designation.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @patent{Fuchs1999b, title = {Dynamic generation of imperceptible structured light for tracking and acquisition of three dimensional scene geometry and surface characteristics in interactive three dimensional computer graphics applications}, author = {Henry Fuchs and Mark Livingston and Gary Bishop and Gregory Francis Welch}, url = {https://patents.google.com/patent/US5870136A/en?oq=US+5%2c870%2c136 http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL&p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=5,870,136.PN.&OS=PN/5,870,136&RS=PN/5,870,136}, year = {1999}, date = {1999-02-09}, number = {US 5870136A}, location = {US}, abstract = {Methods, systems and computer products are provided for tracking objects within a scene using imperceptible structured light. The imperceptible structured light may be used in environments where humans work and therefore avoids disorienting observers. The structured light patterns are generated dynamically, allowing tracking without physical landmarks. Moreover, the occlusion of the generated landmarks is overcome by determining the position of objects using a determined camera position. The imperceptible structured light may be used in systems without tracking. The tracking may be used without imperceptible structured light.}, note = {Filed: 1997-12-05}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {patent} } @incollection{Raskar1999a, title = {Mutli-Projector Displays Using Camera-Based Registration}, author = {Ramesh Raskar and Michael Brown and Ruigang Yang and Wei-Chao Chen and Greg Welch and Herman Towles and Brent Seales and Henry Fuchs }, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1999a.pdf}, year = {1999}, date = {1999-01-01}, booktitle = {Proceedings of the Conference on Visualization 99: Celebrating Ten Years}, pages = {161-168}, address = {San Francisco, CA, USA (October 24 - 29)}, series = {IEEE Visualization}, abstract = {Conventional projector-based display systems are typically designed around precise and regular configurations of projectors and display surfaces. While this results in rendering simplicity and speed,it also means painstaking construction and ongoing maintenance. In previously published work, we introduced a vision of projector-based displays constructed from a collection of casually-arranged projectors and display surfaces.parIn this paper,we present flexible yet practical methods for realizing this vision, enabling low-cost mega-pixel display systems with large physical dimensions, higher resolution, or both. The techniques afford new opportunities to build personal 3D visualization systems in offices, conference rooms, theaters, or even your living room. As a demonstration of the simplicity and effectiveness of the methods that we continue to perfect, we show in the included video that a 10-year old child can construct and calibrate a two-camera, two-projector, head-tracked display system, all in about 15 minutes.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{Raskar1999aa, title = {Table-Top Spatially-Augmented Reality: Bringing Physical Models to Life with Projected Imagery}, author = {Ramesh Raskar and Greg Welch and Wei-Chao Chen}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1999aa.pdf}, isbn = {0-7695-0359-4}, year = {1999}, date = {1999-01-01}, booktitle = {IWAR '99: Proceedings of the 2nd IEEE and ACM International Workshop on Augmented Reality}, pages = {64}, publisher = {IEEE Computer Society}, address = {Washington, DC, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Raskar1999b, title = {Table-Top Spatially-Augmented Reality: Bringing Physical Models to Life with Projected Imagery}, author = {Ramesh Raskar and Greg Welch and Wei-Chao Chen}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1999b.pdf}, year = {1999}, date = {1999-01-01}, booktitle = {Second International Workshop on Augmented Reality (IWAR'99)}, pages = {64-71}, address = {San Francisco, CA, USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{Seales1999, title = {Real-Time Depth Warping for 3-D Scene Reconstruction}, author = {Brent Seales and Greg Welch and Chris Jaynes}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Seales1999.pdf}, year = {1999}, date = {1999-01-01}, booktitle = {1999 IEEE Aerospace Conference}, address = {Snowmass at Aspen, CO USA}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @unpublished{Welch1999aa, title = {A Self-Contained Wide-Area Tracker using Sensor Fusion}, author = {Greg Welch and Matthew Cutts and Gary Bishop}, year = {1999}, date = {1999-01-01}, note = {Submission to SIGGRAPH 1999 (ID papers_1566)}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @misc{Bishop1998ab, title = {Grids Progress Meeting (Slides)}, author = {Gary Bishop and Greg Welch}, year = {1998}, date = {1998-10-01}, howpublished = {Presented at DARP status meeting}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {misc} } @misc{Welch1998aa, title = {HiBall+Inerital Source Code}, author = {Greg Welch and Gary Bishop}, year = {1998}, date = {1998-06-01}, howpublished = {Internal to UNC-CH Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {misc} } @inproceedings{Azuma1998, title = {Making Augmented Reality Work Outdoors Requires Hybrid Tracking}, author = {Ronald T. Azuma and Bruce R. Hoff and Howard E. Neely, III and Ronald Sarfaty and Michael J. Daily and Gary Bishop and Vernon Chi and Greg Welch and Ulrich Neumann and Suya You and Rich Nichols and Jim Cannon}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Azuma1998.pdf}, year = {1998}, date = {1998-01-01}, booktitle = {First International Workshop on Augmented Reality}, pages = {219-224}, address = {San Francisco, CA, USA}, abstract = {Developing Augmented Reality systems that work outdoors, rather than indoors in constrained environments, will open new application areas and motivate the construction of new, more general tracking approaches. Accurate tracking outdoors is difficult because we have little control over the environment and fewer resources available compared to an indoor application. This position paper examines the individual tracking technologies available and concludes that for the near term, a hybrid solution is the only viable approach. The distortion measured from an electronic compass and tilt sensor is discussed.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Raskar1998a, title = {The Office of the Future: A Unified Approach to Image-Based Modeling and Spatially Immersive Displays}, author = {Ramesh Raskar and Gregory Welch and Matthew Cutts and Adam Lake and Lev Stesin and Henry Fuchs}, editor = {Michael F. Cohen}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998a.pdf}, doi = {10.1145/280814.280861}, year = {1998}, date = {1998-01-01}, booktitle = {Computer Graphics}, pages = {179-188}, publisher = {ACM Press, Addison-Wesley}, address = {Orlando, FL, USA (July 19 - 24)}, edition = {SIGGRAPH Conference Proceedings}, series = {Annual Conference on Computer Graphics & Interactive Techniques}, abstract = {We introduce ideas, proposed technologies, and initial results for an office of the future that is based on a unified application of computer vision and computer graphics in a system that combines and builds upon the notions of the CAVE, tiled display systems, and image-based modeling. The basic idea is to use real-time computer vision techniques to dynamically extract per-pixel depth and reflectance information for the visible surfaces in the office including walls, furniture, objects, and people, and then to either project images on the surfaces, render images of the surfaces, or interpret changes in the surfaces. In the first case, one could designate every-day (potentially irregular) real surfaces in the office to be used as spatially immersive display surfaces, and then project high-resolution graphics and text onto those surfaces. In the second case, one could transmit the dynamic image-based models over a network for display at a remote site. Finally, one could interpret dynamic changes in the surfaces for the purposes of tracking, interaction, or augmented reality applications.parTo accomplish the simultaneous capture and display we envision an office of the future where the ceiling lights are replaced by computer controlled cameras and smart projectors that are used to capture dynamic image-based models with imperceptible structured light techniques, and to display high-resolution images on designated display surfaces. By doing both simultaneously on the designated display surfaces, one can dynamically adjust or autocalibrate for geometric, intensity, and resolution variations resulting from irregular or changing display surfaces, or overlapped projector images.parOur current approach to dynamic image-based modeling is to use an optimized structured light scheme that can capture per-pixel depth and reflectance at interactive rates. Our system implementation is not yet imperceptible, but we can demonstrate the approach in the laboratory. Our approach to rendering on the designated (potentially irregular) display surfaces is to employ a two-pass projective texture scheme to generate images that when projected onto the surfaces appear correct to a moving head-tracked observer. We present here an initial implementation of the overall vision, in an office-like setting, and preliminary demonstrations of our dynamic modeling and display techniques.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @inproceedings{Raskar1998aa, title = {Spatially Augmented Reality}, author = {Ramesh Raskar and Greg Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998d-IWAR_SAR.pdf}, year = {1998}, date = {1998-01-01}, booktitle = {In First IEEE Workshop on Augmented Reality (IWAR '98)}, pages = {11--20}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @inproceedings{Raskar1998b, title = {Seamless Projection Overlaps Using Warping and Intensity Blending}, author = {Ramesh Raskar and Greg Welch and Henry Fuchs}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998b.pdf}, year = {1998}, date = {1998-01-01}, booktitle = {Fourth International Conference on Virtual Systems and Multimedia (VSMM)}, address = {Gifu, Japan}, abstract = {High-resolution Spatially Immersive Displays (SID) generally involve wide field of view (WFOV) image generation using multiple projectors. This paper describes a robust calibration and rendering method for projector based seamless displays using a video camera. It solves the basic problem of registering and blending overlap of two projections at a time. It is applicable even when the displays are not flat walls or projection axes are not orthogonal to the displays. Projectors' intrinsic or extrinsic parameters are not required.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {inproceedings} } @incollection{Raskar1998c, title = {Efficient Image Generation for Multiprojector and Multisurface Displays}, author = {Ramesh Raskar and Matthew Cutts and Greg Welch and Wolfgang Stüerzlinger}, editor = {George Drettakis and Nelson Max}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998c.pdf}, year = {1998}, date = {1998-01-01}, booktitle = {Proceedings of the Eurographics Workshop in Vienna, Austria}, pages = {139-144}, publisher = {Springer Verlag}, address = {Vienna, Austria (June 29 - July 1)}, edition = {Rendering Techniques 98}, note = {ISBN 3-211-83213-0}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @incollection{Raskar1998d, title = {Spatially Augmented Reality}, author = {Ramesh Raskar and Greg Welch and Henry Fuchs}, editor = {Reinhold Behringer and Gudrun Klinker and David Mizell}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998d-IWAR_SAR.pdf}, year = {1998}, date = {1998-01-01}, booktitle = {Augmented Reality; Placing Artificial Objects in Real ScenesProceedings of the First IEEE Workshop on Augmented Reality (IWAR'98). Long lasting Impact Paper Award}, pages = {63-72}, publisher = {A.K. Peters Ltd.}, address = {San Francisco, CA, USA (November 1, 1998)}, note = {ISBN 1-56881-098-9}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @techreport{Raskar1998e, title = {3D Talking Heads: Image Based Modeling at Interactive Rates using Structured Light Projection}, author = {Ramesh Raskar and Henry Fuchs and Gregory Welch and Adam Lake and Matthew Cutts}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Raskar1998e.pdf}, year = {1998}, date = {1998-01-01}, number = {TR98-017}, institution = {University of North Carolina at Chapel Hill, Department of Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @incollection{Welch1997, title = {SCAAT: Incremental Tracking with Incomplete Information}, author = {Greg Welch and Gary Bishop}, editor = {Turner Whitted}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1997.pdf}, year = {1997}, date = {1997-01-01}, booktitle = {Computer Graphics}, pages = {333--344}, publisher = {ACM Press, Addison-Wesley}, address = {Los Angeles, CA, USA (August 3--8)}, edition = {SIGGRAPH 97 Conference Proceedings}, series = {Annual Conference on Computer Graphics & Interactive Techniques}, abstract = {We present a promising new mathematical method for tracking a user's pose (position and orientation) for interactive computer graphics. The method, which is applicable to a wide variety of both commercial and experimental systems, improves accuracy by properly assimilating sequential observations, filtering sensor measurements, and by concurrently autocalibrating source and sensor devices. It facilitates user motion prediction, multisensor data fusion, and higher report rates with lower latency than previous methods.parTracking systems determine the user's pose by measuring signals from low-level hardware sensors. For reasons of physics and economics, most systems make multiple sequentialnewlinemeasurements which are then combined to produce a single tracker report. For example, commercial magnetic trackers using the SPASYN (Space Synchro) system sequentially measure three magnetic vectors and then combine them mathematically to produce a report of the sensor pose.parOur new approach produces tracker reports as each new low-level sensor measurement is made rather than waiting to form a complete collection of observations. Because single observationsnewlineunder-constrain the mathematical solution, we refer to our approach as single-constraint-at-a-time or SCAAT tracking. The key is that the single observations provide some information about the user's state, and thus can be used to incrementally improve anewlineprevious estimate. We recursively apply this principle, incorporating new sensor data as soon as it is measured. With this approach we are able to generate estimates more frequently, withnewlineless latency, and with improved accuracy. We present results from both an actual implementation, and from extensive simulations.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {incollection} } @phdthesis{Welch1996, title = {SCAAT: Incremental Tracking with Incomplete Information}, author = {Gregory Francis Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1996.pdf}, year = {1996}, date = {1996-01-01}, address = {Chapel Hill, NC}, school = {University of North Carolina at Chapel Hill}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {phdthesis} } @techreport{Welch1996b, title = {One‐Step‐at‐a‐Time Tracking}, author = {Greg Welch and Gary Bishop}, year = {1996}, date = {1996-01-01}, number = {TR96‐021}, institution = {University of North Carolina at Chapel Hill, Department of Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @techreport{Welch1995aa, title = {Hybrid Self-Tracker: An Inertial/Optical Hybrid Three-Dimensional Tracking System}, author = {Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1995aa.pdf}, year = {1995}, date = {1995-01-01}, number = {TR95-048}, institution = {University of North Carolina at Chapel Hill, Department of Computer Science}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @article{Welch1995ab, title = {A Survey of Power Management Techniques in Mobile Computing Operating Systems}, author = {Greg Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1995ab.pdf}, doi = {10.1145/219282.219293}, year = {1995}, date = {1995-01-01}, journal = {ACM Operating Systems Review (SIGOPS-OSR)}, volume = {29}, number = {4}, pages = {47-56}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {article} } @techreport{Welch1995b, title = {An Introduction to the Kalman filter}, author = {Greg Welch and Gary Bishop}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/kalman_intro.pdf}, year = {1995}, date = {1995-01-01}, number = {TR95-041}, institution = {University of North Carolina at Chapel Hill, Department of Computer Science}, note = {The article has also been translated into Chinese by Xuchen Yao, a student at The Institute of Acoustics of The Chinese Academy of Sciences. See also our Kalman filter web site at https://www.cs.unc.edu/~welch/kalman/index.html.}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {techreport} } @unpublished{Welch1989aa, title = {Versicom: Versatile Communications Software}, author = {Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1989aa.pdf}, year = {1989}, date = {1989-07-01}, note = {NASA Jet Propulsion Laboratory}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @unpublished{Welch1986aa, title = {The Easy Chair: A Microprocessor-Controlled Wheelchair for Children With Muscular Disorders}, author = {Gregory F. Welch and James P. Williams}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1986_EasyChair-red.pdf}, year = {1986}, date = {1986-05-01}, note = {Purdue University, E.E.T. 490/491 Senior Design Project, Final Report}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @unpublished{Welch1986ab, title = {The Infrared Touch-Pad}, author = {Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1986ab.pdf}, year = {1986}, date = {1986-02-01}, note = {Purdue University, E.E.T. 421 Report}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @unpublished{Welch1985aa, title = {The Easy Chair: A Microprocessor-Controlled Wheelchair for Children With Muscular Disorders}, author = {Gregory F. Welch and James P. Williams}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Welch1985_EasyChair.pdf}, year = {1985}, date = {1985-12-01}, note = {Purdue University, E.E.T. 490/491 Senior Design Project, Preliminary Report}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @unpublished{Williams1985aa, title = {The Pressure Sensitive Touch-Pad}, author = {James P. Williams and Gregory F. Welch}, url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/Williams1985aa.pdf}, year = {1985}, date = {1985-04-01}, note = {Purdue University, E.E.T. 454 Project Report}, keywords = {A-gfw}, pubstate = {published}, tppubtype = {unpublished} } @article{Nagendran2011, keywords = {}, pubstate = {published}, tppubtype = {article} } @bachelorthesis{nokey, title = {[No title]}, keywords = {}, pubstate = {published}, tppubtype = {bachelorthesis} } @bachelorthesis{nokey, title = {[No title]}, keywords = {}, pubstate = {published}, tppubtype = {bachelorthesis} } @inproceedings{nokey, title = {Exploring the Social Influence of Virtual Humans Unintentionally Conveying Conflicting Emotions}, author = {Zubin Choudhary and Nahal Norouzi and Austin Erickson and Ryan Schubert and Gerd Bruder and Gregory F Welch}, editor = {Proceedings of the 30th IEEE Conference on Virtual Reality and 3D User Interfaces (IEEE VR) 2023}, keywords = {}, pubstate = {published}, tppubtype = {inproceedings} } @{, keywords = {}, pubstate = {published}, tppubtype = {} }