2010
|
 | David Donszik; Bastian Lengert; Gerd Bruder; Klaus H. Hinrichs; Frank Steinicke 3D-Manipulationstechnik f Proceedings Article In: Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR), pp. 83–92, 2010. @inproceedings{DLBHS10,
title = {3D-Manipulationstechnik f},
author = { David Donszik and Bastian Lengert and Gerd Bruder and Klaus H. Hinrichs and Frank Steinicke},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/DLBHS10.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)},
pages = {83--92},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
![[POSTER] Estimation of Virtual Interpupillary Distances for Immersive Head-Mounted Displays](https://sreal.ucf.edu/wp-content/uploads/2017/02/BSH10.png) | Gerd Bruder; Frank Steinicke; Klaus H. Hinrichs [POSTER] Estimation of Virtual Interpupillary Distances for Immersive Head-Mounted Displays Proceedings Article In: Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV) (Poster Presentation), pp. 168, 2010. @inproceedings{BSH10,
title = {[POSTER] Estimation of Virtual Interpupillary Distances for Immersive Head-Mounted Displays},
author = {Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSH10.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV) (Poster Presentation)},
pages = {168},
abstract = {Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. In order to present a realistic stereoscopic view, the rendering system has to be adjusted to the characteristics of the HMD, e. g., the display's field of view (FOV), as well as to characteristics that are unique for each user, in particular her interpupillary distance (IPD). Typically, the user's IPD is measured, and then applied to the virtual IPD used for rendering, assuming that the HMD's display units are correctly adjusted in front of the user's eyes. A discrepancy between the user's IPD and the virtual IPD may distort the perception of the VE. In this poster we analyze the user's perception of a VE in a HMD environment, which is displayed stereoscopically with different IPDs. We conducted an experiment to identify virtual IPDs that are identified as natural by subjects for different FOVs. In our experiment, subjects had to adjust the IPD for a rendered virtual replica of our real laboratory until perception of the virtual replica matched perception of the real laboratory. We found that the virtual IPDs subjects estimate as most natural are often not identical to their IPDs, and that the estimations were affected by the FOV of the HMD and the virtual FOV used for rendering.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Head-mounted displays (HMDs) allow users to observe virtual environments (VEs) from an egocentric perspective. In order to present a realistic stereoscopic view, the rendering system has to be adjusted to the characteristics of the HMD, e. g., the display's field of view (FOV), as well as to characteristics that are unique for each user, in particular her interpupillary distance (IPD). Typically, the user's IPD is measured, and then applied to the virtual IPD used for rendering, assuming that the HMD's display units are correctly adjusted in front of the user's eyes. A discrepancy between the user's IPD and the virtual IPD may distort the perception of the VE. In this poster we analyze the user's perception of a VE in a HMD environment, which is displayed stereoscopically with different IPDs. We conducted an experiment to identify virtual IPDs that are identified as natural by subjects for different FOVs. In our experiment, subjects had to adjust the IPD for a rendered virtual replica of our real laboratory until perception of the virtual replica matched perception of the real laboratory. We found that the virtual IPDs subjects estimate as most natural are often not identical to their IPDs, and that the estimations were affected by the FOV of the HMD and the virtual FOV used for rendering. |
![[POSTER] Immersive Virtual Studio for Architectural Exploration](https://sreal.ucf.edu/wp-content/uploads/2017/02/BSVH10a.jpg) | Gerd Bruder; Frank Steinicke; Dimitar Valkov; Klaus H. Hinrichs [POSTER] Immersive Virtual Studio for Architectural Exploration Proceedings Article In: Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation), pp. 125–126, 2010. @inproceedings{BSVH10a,
title = {[POSTER] Immersive Virtual Studio for Architectural Exploration},
author = {Gerd Bruder and Frank Steinicke and Dimitar Valkov and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSVH10a-optimized.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation)},
pages = {125--126},
abstract = {Architects use a variety of analog and digital tools and media to plan and design constructions. Immersive virtual reality (VR) technologies have shown great potential for architectural design, especially for exploration and review of design proposals. In this work we propose a virtual studio system, which allows architects and clients to use arbitrary real-world tools such as maps or rulers during immersive exploration of virtual 3D models. The user interface allows architects and clients to review designs and compose 3D architectural scenes, combining benefits of mixed-reality environments with immersive head-mounted display setups.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Architects use a variety of analog and digital tools and media to plan and design constructions. Immersive virtual reality (VR) technologies have shown great potential for architectural design, especially for exploration and review of design proposals. In this work we propose a virtual studio system, which allows architects and clients to use arbitrary real-world tools such as maps or rulers during immersive exploration of virtual 3D models. The user interface allows architects and clients to review designs and compose 3D architectural scenes, combining benefits of mixed-reality environments with immersive head-mounted display setups. |
![[POSTER] A Virtual Reality Handball Goalkeeper Analysis System](https://sreal.ucf.edu/wp-content/uploads/2017/02/BZBSHFS10.jpg) | Benjamin Bolte; Florian Zeidler; Gerd Bruder; Frank Steinicke; Klaus H. Hinrichs; Lennart Fischer; Jőrg Schorer [POSTER] A Virtual Reality Handball Goalkeeper Analysis System Proceedings Article In: Proceedings of the Joint Virtual Reality Conference (JVRC) (Poster Presentation), pp. 1–2, 2010. @inproceedings{BZBSHFS10,
title = {[POSTER] A Virtual Reality Handball Goalkeeper Analysis System},
author = {Benjamin Bolte and Florian Zeidler and Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs and Lennart Fischer and Jőrg Schorer},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BZBSHFS10.pdf},
year = {2010},
date = {2010-01-01},
booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC) (Poster Presentation)},
pages = {1--2},
abstract = {Understanding how professional handball goalkeepers acquire skills to combine decision-making and complex motor tasks is a multidisciplinary challenge. In order to improve a goalkeeper's training by allowing insights into their complex perception, learning and action processes, virtual reality (VR) technologies provide a way to standardize experimental sport situations. In this poster we describe a VR-based handball system, which supports the evaluation of perceptual-motor skills of handball goalkeepers during shots. In order to allow reliable analyses it is essential that goalkeepers can move naturally like they would do in a real game situation, which is often inhibited by wires or markers that are usually used in VR systems. To address this challenge, we developed a camera-based goalkeeper analysis system, which allows to detect and measure motions of goalkeepers in real-time.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Understanding how professional handball goalkeepers acquire skills to combine decision-making and complex motor tasks is a multidisciplinary challenge. In order to improve a goalkeeper's training by allowing insights into their complex perception, learning and action processes, virtual reality (VR) technologies provide a way to standardize experimental sport situations. In this poster we describe a VR-based handball system, which supports the evaluation of perceptual-motor skills of handball goalkeepers during shots. In order to allow reliable analyses it is essential that goalkeepers can move naturally like they would do in a real game situation, which is often inhibited by wires or markers that are usually used in VR systems. To address this challenge, we developed a camera-based goalkeeper analysis system, which allows to detect and measure motions of goalkeepers in real-time. |
2009
|
 | Frank Steinicke; Gerd Bruder; Klaus H. Hinrichs; Markus Lappe; Brian Ries; Victoria Interrante Transitional Environments Enhance Distance Perception in Immersive Virtual Reality Systems Proceedings Article In: Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV), pp. 19–26, ACM Press, 2009. @inproceedings{SBHLRI09,
title = {Transitional Environments Enhance Distance Perception in Immersive Virtual Reality Systems},
author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Markus Lappe and Brian Ries and Victoria Interrante},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHLRI09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the ACM Symposium on Applied Perception in Graphics and Visualization (APGV)},
pages = {19--26},
publisher = {ACM Press},
abstract = {Several experiments have provided evidence that ego-centric distances are perceived as compressed in immersive virtual environments relative to the real world. The principal factors responsible for this phenomenon have remained largely unknown. However, recent experiments suggest that when the virtual environment (VE) is an exact replica of a user's real physical surroundings, the person's distance perception improves. Furthermore, it has been shown that when users start their virtual reality (VR) experience in such a virtual replica and then gradually transition to a different VE, their sense of presence in the actual virtual world increases significantly. In this case the virtual replica serves as a transitional environment between the real and virtual world. In this paper we examine whether a person's distance estimation skills can be transferred from a transitional environment to a different VE. We have conducted blind walking experiments to analyze if starting the VR experience in a transitional environment can improve a person's ability to estimate distances in an immersive VR system. We found that users significantly improve their distance estimation skills when they enter the virtual world via a transitional environment.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Several experiments have provided evidence that ego-centric distances are perceived as compressed in immersive virtual environments relative to the real world. The principal factors responsible for this phenomenon have remained largely unknown. However, recent experiments suggest that when the virtual environment (VE) is an exact replica of a user's real physical surroundings, the person's distance perception improves. Furthermore, it has been shown that when users start their virtual reality (VR) experience in such a virtual replica and then gradually transition to a different VE, their sense of presence in the actual virtual world increases significantly. In this case the virtual replica serves as a transitional environment between the real and virtual world. In this paper we examine whether a person's distance estimation skills can be transferred from a transitional environment to a different VE. We have conducted blind walking experiments to analyze if starting the VR experience in a transitional environment can improve a person's ability to estimate distances in an immersive VR system. We found that users significantly improve their distance estimation skills when they enter the virtual world via a transitional environment. |
 | Gerd Bruder; Frank Steinicke; Klaus H. Hinrichs; Markus Lappe Reorientation during Body Turns Proceedings Article In: Proceedings of the Joint Virtual Reality Conference (JVRC), pp. 145–152, 2009. @inproceedings{BSHL09,
title = {Reorientation during Body Turns},
author = { Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs and Markus Lappe},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSHL09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the Joint Virtual Reality Conference (JVRC)},
pages = {145--152},
abstract = {Immersive virtual environment (IVE) systems allow users to control their virtual viewpoint by moving their tracked head and by walking through the real world, but usually the virtual space which can be explored by walking is restricted to the size of the tracked space of the laboratory. However, as the user approaches an edge of the tracked walking area, reorientation techniques can be applied to imperceptibly turn the user by manipulating the mapping between real-world body turns and virtual camera rotations. With such reorientation techniques, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments we have quantified how much users can unknowingly be reoriented during body turns. We tested 18 subjects in two different experiments. First, in a just-noticeable difference test subjects had to perform two successive body turns between which they had to discriminate. In the second experiment subjects performed body turns that were mapped to different virtual camera rotations. Subjects had to estimate whether the visually perceived rotation was slower or faster than the physical rotation. Our results show that the detection thresholds for reorientation as well as the point of subjective equality between real movement and visual stimuli depend on the virtual rotation angle.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Immersive virtual environment (IVE) systems allow users to control their virtual viewpoint by moving their tracked head and by walking through the real world, but usually the virtual space which can be explored by walking is restricted to the size of the tracked space of the laboratory. However, as the user approaches an edge of the tracked walking area, reorientation techniques can be applied to imperceptibly turn the user by manipulating the mapping between real-world body turns and virtual camera rotations. With such reorientation techniques, users can walk through large-scale IVEs while physically remaining in a reasonably small workspace. In psychophysical experiments we have quantified how much users can unknowingly be reoriented during body turns. We tested 18 subjects in two different experiments. First, in a just-noticeable difference test subjects had to perform two successive body turns between which they had to discriminate. In the second experiment subjects performed body turns that were mapped to different virtual camera rotations. Subjects had to estimate whether the visually perceived rotation was slower or faster than the physical rotation. Our results show that the detection thresholds for reorientation as well as the point of subjective equality between real movement and visual stimuli depend on the virtual rotation angle. |
 | Frank Steinicke; Gerd Bruder; Klaus H. Hinrichs; Jason Jerald; Harald Frenz; Markus Lappe Real Walking through Virtual Environments by Redirection Techniques Journal Article In: Journal of Virtual Reality and Broadcasting (JVRB), vol. 6, no. 2, pp. 1–16, 2009. @article{SBHJFL09,
title = {Real Walking through Virtual Environments by Redirection Techniques},
author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Jason Jerald and Harald Frenz and Markus Lappe},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHJFL09.pdf},
year = {2009},
date = {2009-01-01},
journal = {Journal of Virtual Reality and Broadcasting (JVRB)},
volume = {6},
number = {2},
pages = {1--16},
abstract = {We present redirection techniques that support exploration of large-scale virtual environments (VEs) by means of real walking. We quantify to what degree users can unknowingly be redirected in order to guide them through VEs in which virtual paths differ from the physical paths. We further introduce the concept of dynamic passive haptics by which any number of virtual objects can be mapped to real physical proxy props having similar haptic properties (i. e., size, shape, and surface structure), such that the user can sense these virtual objects by touching their real world counterparts. Dynamic passive haptics provides the user with the illusion of interacting with a desired virtual object by redirecting her to the corresponding proxy prop. We describe the concepts of generic redirected walking and dynamic passive haptics and present experiments in which we have evaluated these concepts. Furthermore, we discuss implications that have been derived from a user study, and we present approaches that derive physical paths which may vary from the virtual counterparts.},
keywords = {},
pubstate = {published},
tppubtype = {article}
}
We present redirection techniques that support exploration of large-scale virtual environments (VEs) by means of real walking. We quantify to what degree users can unknowingly be redirected in order to guide them through VEs in which virtual paths differ from the physical paths. We further introduce the concept of dynamic passive haptics by which any number of virtual objects can be mapped to real physical proxy props having similar haptic properties (i. e., size, shape, and surface structure), such that the user can sense these virtual objects by touching their real world counterparts. Dynamic passive haptics provides the user with the illusion of interacting with a desired virtual object by redirecting her to the corresponding proxy prop. We describe the concepts of generic redirected walking and dynamic passive haptics and present experiments in which we have evaluated these concepts. Furthermore, we discuss implications that have been derived from a user study, and we present approaches that derive physical paths which may vary from the virtual counterparts. |
 | Frank Steinicke; Gerd Bruder; Klaus H. Hinrichs; Anthony Steed Presence-Enhancing Real Walking User Interface for First-Person Video Games Proceedings Article In: Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Game Papers, ACM Press, 2009, ((acceptance rate 25%)). @inproceedings{SBHS09,
title = {Presence-Enhancing Real Walking User Interface for First-Person Video Games},
author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Anthony Steed},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBHS09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH), Game Papers},
publisher = {ACM Press},
abstract = {For most first-person video games it is important that players have a high level of feeling presence in the displayed game environment. Virtual reality (VR) technologies have enormous potential to enhance gameplay since players can experience the game immersively from the perspective of the player's virtual character. However, the VR technology itself, such as tracking devices and cabling, has until recently restricted the ability of users to really walk over long distances. In this paper we introduce a VR-based user interface for presenceenhancing gameplay with which players can explore the game environment in the most natural way, i. e., by real walking. While the player walks through the virtual game environment, we guide him/her on a physical path which is different from the virtual path and fits into the VR laboratory space. In order to further increase the VR experience, we introduce the concept of transitional environments. Such a transitional environment is a virtual replica of the laboratory environment, where the VR experience starts and which enables a gradual transition to the game environment. We have quantified how much humans can unknowingly be redirected and whether or not a gradual transition to a first-person game via a transitional environment increases the user's sense of presence.},
note = {(acceptance rate 25%)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
For most first-person video games it is important that players have a high level of feeling presence in the displayed game environment. Virtual reality (VR) technologies have enormous potential to enhance gameplay since players can experience the game immersively from the perspective of the player's virtual character. However, the VR technology itself, such as tracking devices and cabling, has until recently restricted the ability of users to really walk over long distances. In this paper we introduce a VR-based user interface for presenceenhancing gameplay with which players can explore the game environment in the most natural way, i. e., by real walking. While the player walks through the virtual game environment, we guide him/her on a physical path which is different from the virtual path and fits into the VR laboratory space. In order to further increase the VR experience, we introduce the concept of transitional environments. Such a transitional environment is a virtual replica of the laboratory environment, where the VR experience starts and which enables a gradual transition to the game environment. We have quantified how much humans can unknowingly be redirected and whether or not a gradual transition to a first-person game via a transitional environment increases the user's sense of presence. |
 | Frank Steinicke; Gerd Bruder; Scott Kuhl; Pete Willemsen; Markus Lappe; Klaus H. Hinrichs Judgment of Natural Perspective Projections in Head-Mounted Display Environments Proceedings Article In: Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST), pp. 35–42, ACM Press, 2009. @inproceedings{SBKWLH09,
title = {Judgment of Natural Perspective Projections in Head-Mounted Display Environments},
author = { Frank Steinicke and Gerd Bruder and Scott Kuhl and Pete Willemsen and Markus Lappe and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBKWLH09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)},
pages = {35--42},
publisher = {ACM Press},
abstract = {The display units integrated in todays head-mounted displays (HMDs) provide only a limited field of view (FOV) to the virtual world. In order to present an undistorted view to the virtual environment (VE), the perspective projection used to render the VE has to be adjusted to the limitations caused by the HMD characteristics. In particular, the geometric field of view (GFOV), which defines the virtual aperture angle used for rendering of the 3D scene, is set up according to the display's field of view. A discrepancy between these two fields of view distorts the geometry of the VE in such a way that objects and distances appear to be "warped". Although discrepancies between the geometric and the HMD's field of view affect a person's perception of space, the resulting mini- and magnification of the displayed scene can be useful in some applications and may improve specific aspects of immersive virtual environments, for example, distance judgment, presence, and visual search task performance. In this paper we analyze if a user is consciously aware of perspective distortions of the VE displayed in the HMD. We introduce a psychophysical calibration method to determine the HMD's actual field of view, which may vary from the nominal values specified by the manufacturer. Furthermore, we conducted an experiment to identify perspective projections for HMDs, which are perceived as natural by subjects-even if these perspectives deviate from the perspectives that are inherently defined by the display's field of view. We found that subjects evaluate a field of view as natural when it is larger than the actual field of view of the HMD; in some case up to 50%.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
The display units integrated in todays head-mounted displays (HMDs) provide only a limited field of view (FOV) to the virtual world. In order to present an undistorted view to the virtual environment (VE), the perspective projection used to render the VE has to be adjusted to the limitations caused by the HMD characteristics. In particular, the geometric field of view (GFOV), which defines the virtual aperture angle used for rendering of the 3D scene, is set up according to the display's field of view. A discrepancy between these two fields of view distorts the geometry of the VE in such a way that objects and distances appear to be "warped". Although discrepancies between the geometric and the HMD's field of view affect a person's perception of space, the resulting mini- and magnification of the displayed scene can be useful in some applications and may improve specific aspects of immersive virtual environments, for example, distance judgment, presence, and visual search task performance. In this paper we analyze if a user is consciously aware of perspective distortions of the VE displayed in the HMD. We introduce a psychophysical calibration method to determine the HMD's actual field of view, which may vary from the nominal values specified by the manufacturer. Furthermore, we conducted an experiment to identify perspective projections for HMDs, which are perceived as natural by subjects-even if these perspectives deviate from the perspectives that are inherently defined by the display's field of view. We found that subjects evaluate a field of view as natural when it is larger than the actual field of view of the HMD; in some case up to 50%. |
 | Gerd Bruder; Frank Steinicke; Kai Rothaus; Klaus H. Hinrichs Enhancing Presence in Head-mounted Display Environments by Visual Body Feedback using Head-mounted Cameras Proceedings Article In: Proceedings of the International Conference on CyberWorlds, pp. 43–50, IEEE Press, 2009. @inproceedings{BSRH09,
title = {Enhancing Presence in Head-mounted Display Environments by Visual Body Feedback using Head-mounted Cameras},
author = { Gerd Bruder and Frank Steinicke and Kai Rothaus and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSRH09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the International Conference on CyberWorlds},
pages = {43--50},
publisher = {IEEE Press},
abstract = {A fully-articulated visual representation of a user in an immersive virtual environment (IVE) can enhance the user's subjective sense of feeling present in the virtual world. Usually this means that a user has to wear a full-body motion capture suit to track real-world body motions and map them to a virtual body model. In this paper we present an augmented virtuality approach that allows to incorporate a realistic view of oneself in virtual environments using cameras attached to head mounted displays. The described system can easily be integrated into typical virtual reality setups. Egocentric camera images captured by a video-see-through system are segmented in real-time into foreground, showing parts of the user's body, e. g., her hands or feet, and background. The segmented foreground is then displayed as inset in the user's current view of the virtual world. Thus the user is able to see her physical body in an arbitrary virtual world, including individual characteristics such as skin pigmentation, hairiness etc.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
A fully-articulated visual representation of a user in an immersive virtual environment (IVE) can enhance the user's subjective sense of feeling present in the virtual world. Usually this means that a user has to wear a full-body motion capture suit to track real-world body motions and map them to a virtual body model. In this paper we present an augmented virtuality approach that allows to incorporate a realistic view of oneself in virtual environments using cameras attached to head mounted displays. The described system can easily be integrated into typical virtual reality setups. Egocentric camera images captured by a video-see-through system are segmented in real-time into foreground, showing parts of the user's body, e. g., her hands or feet, and background. The segmented foreground is then displayed as inset in the user's current view of the virtual world. Thus the user is able to see her physical body in an arbitrary virtual world, including individual characteristics such as skin pigmentation, hairiness etc. |
 | Frank Steinicke; Gerd Bruder; Anthony Steed; Klaus H. Hinrichs; Alexander Gerlach Does a Gradual Transition to the Virtual World increase Presence? Proceedings Article In: Proceedings of IEEE Virtual Reality (VR), pp. 203–210, IEEE Press, 2009. @inproceedings{SBSHG09,
title = {Does a Gradual Transition to the Virtual World increase Presence?},
author = { Frank Steinicke and Gerd Bruder and Anthony Steed and Klaus H. Hinrichs and Alexander Gerlach},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBSHG09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of IEEE Virtual Reality (VR)},
pages = {203--210},
publisher = {IEEE Press},
abstract = {In order to increase a user's sense of presence in an artificial environment some researchers propose a gradual transition from reality to the virtual world instead of immersing users into the virtual world directly. One approach is to start the VR experience in a virtual replica of the physical space to accustom users to the characteristics of VR, e.g., latency, reduced field of view or tracking errors, in a known environment. Although this procedure is already applied in VR demonstrations, until now it has not been verified whether the usage of such a transitional environment - as transition between real and virtual environment - increases someone's sense of presence. We have observed subjective, physiological and behavioral reactions of subjects during a fully-immersive flight phobia experiment under two different conditions: the virtual flight environment was displayed immediately, or subjects visited a transitional environment before entering the virtual flight environment. We have quantified to what extent a gradual transition to the VE via a transitional environment increases the level of presence. We have found that subjective responses show significantly higher scores for the user's sense of presence, and that subjects' behavioral reactions change when a transitional environment is shown first. Considering physiological reactions, no significant difference could be found.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In order to increase a user's sense of presence in an artificial environment some researchers propose a gradual transition from reality to the virtual world instead of immersing users into the virtual world directly. One approach is to start the VR experience in a virtual replica of the physical space to accustom users to the characteristics of VR, e.g., latency, reduced field of view or tracking errors, in a known environment. Although this procedure is already applied in VR demonstrations, until now it has not been verified whether the usage of such a transitional environment - as transition between real and virtual environment - increases someone's sense of presence. We have observed subjective, physiological and behavioral reactions of subjects during a fully-immersive flight phobia experiment under two different conditions: the virtual flight environment was displayed immediately, or subjects visited a transitional environment before entering the virtual flight environment. We have quantified to what extent a gradual transition to the VE via a transitional environment increases the level of presence. We have found that subjective responses show significantly higher scores for the user's sense of presence, and that subjects' behavioral reactions change when a transitional environment is shown first. Considering physiological reactions, no significant difference could be found. |
 | Annika Busch; Marius Staggenborg; Tobias Brix; Gerd Bruder; Frank Steinicke; Klaus H. Hinrichs Darstellung physikalischer Objekte in Immersiven Head-Mounted Display Umgebungen Proceedings Article In: Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR), pp. 233–244, Shaker Verlag, 2009. @inproceedings{BSBBSH09,
title = {Darstellung physikalischer Objekte in Immersiven Head-Mounted Display Umgebungen},
author = { Annika Busch and Marius Staggenborg and Tobias Brix and Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSBBSH09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the GI Workshop on Virtual and Augmented Reality (GI VR/AR)},
pages = {233--244},
publisher = {Shaker Verlag},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
|
 | Gerd Bruder; Frank Steinicke; Klaus H. Hinrichs Arch-Explore: A Natural User Interface for Immersive Architectural Walkthroughs Proceedings Article In: Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI), pp. 75–82, IEEE Press, 2009. @inproceedings{BSH09,
title = {Arch-Explore: A Natural User Interface for Immersive Architectural Walkthroughs},
author = { Gerd Bruder and Frank Steinicke and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/BSH09.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI)},
pages = {75--82},
publisher = {IEEE Press},
abstract = {In this paper we propose the Arch-Explore user interface, which supports natural exploration of architectural 3D models at different scales in a real walking virtual reality (VR) environment such as head-mounted display (HMD) or CAVE setups. We discuss in detail how user movements can be transferred to the virtual world to enable walking through virtual indoor environments. To overcome the limited interaction space in small VR laboratory setups, we have implemented redirected walking techniques to support natural exploration of comparably large-scale virtual models. Furthermore, the concept of virtual portals provides a means to cover long distances intuitively within architectural models. We describe the software and hardware setup and discuss benefits of Arch-Explore.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
In this paper we propose the Arch-Explore user interface, which supports natural exploration of architectural 3D models at different scales in a real walking virtual reality (VR) environment such as head-mounted display (HMD) or CAVE setups. We discuss in detail how user movements can be transferred to the virtual world to enable walking through virtual indoor environments. To overcome the limited interaction space in small VR laboratory setups, we have implemented redirected walking techniques to support natural exploration of comparably large-scale virtual models. Furthermore, the concept of virtual portals provides a means to cover long distances intuitively within architectural models. We describe the software and hardware setup and discuss benefits of Arch-Explore. |
 | Frank Steinicke; Gerd Bruder; Kai Rothaus; Klaus H. Hinrichs POSTER: Visual Identity from Egocentric Camera Images for Head-Mounted Display Environments Proceedings Article In: Proceedings of the Virtual Reality International Conference (VRIC) (Poster Proceedings), pp. 289–290, 2009. @inproceedings{SBRH09,
title = {POSTER: Visual Identity from Egocentric Camera Images for Head-Mounted Display Environments},
author = { Frank Steinicke and Gerd Bruder and Kai Rothaus and Klaus H. Hinrichs},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the Virtual Reality International Conference (VRIC) (Poster Proceedings)},
pages = {289--290},
abstract = {A number of researchers have reported that a fully-articulated visual representation of oneself in an immersive virtual environment (IVE) has considerable impact on social interaction and the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user’s body in the VE. Usually, a fully-articulated visual identity or or socalled “virtual body†is manipulated according to user motions which are defined by feature points detected by a tracking system. Therefore, markers have to be attached to certain feature points as done, for instance, with full-body motion coats which have to be worn by the user. Such instrumentation is unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure where the tracked data is always mapped to the same invariant 3D model. In this paper we present a software-based approach that allows to incorporate a realistic visual identity of oneself in the VE, which can be integrated easily into existing hardware setups. In our setup we focus on visual representation of a user's arms and hands. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body, i. e., the human's hands, and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
A number of researchers have reported that a fully-articulated visual representation of oneself in an immersive virtual environment (IVE) has considerable impact on social interaction and the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user’s body in the VE. Usually, a fully-articulated visual identity or or socalled “virtual body†is manipulated according to user motions which are defined by feature points detected by a tracking system. Therefore, markers have to be attached to certain feature points as done, for instance, with full-body motion coats which have to be worn by the user. Such instrumentation is unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure where the tracked data is always mapped to the same invariant 3D model. In this paper we present a software-based approach that allows to incorporate a realistic visual identity of oneself in the VE, which can be integrated easily into existing hardware setups. In our setup we focus on visual representation of a user's arms and hands. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body, i. e., the human's hands, and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized. |
![[POSTER] A Virtual Body for Augmented Virtuality by Chroma-Keying of Egocentric Videos](https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH09a_a.jpg) | Frank Steinicke; Gerd Bruder; Kai Rothaus; Klaus H. Hinrichs [POSTER] A Virtual Body for Augmented Virtuality by Chroma-Keying of Egocentric Videos Proceedings Article In: Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation), pp. 125–126, IEEE Press, 2009. @inproceedings{SBRH09a,
title = {[POSTER] A Virtual Body for Augmented Virtuality by Chroma-Keying of Egocentric Videos},
author = { Frank Steinicke and Gerd Bruder and Kai Rothaus and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH09a.pdf},
year = {2009},
date = {2009-01-01},
booktitle = {Proceedings of the IEEE Symposium on 3D User Interfaces (3DUI) (Poster Presentation)},
pages = {125--126},
publisher = {IEEE Press},
abstract = {A fully-articulated visual representation of oneself in an immersive virtual environment has considerable impact on the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user's body in the VE. Such a virtual body (VB) is manipulated according to user motions which are defined by feature points detected by a tracking system. The required tracking devices are unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure. In this paper we present a software-based approach that allows to incorporate a realistic visual representation of oneself in the VE. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
A fully-articulated visual representation of oneself in an immersive virtual environment has considerable impact on the subjective sense of presence in the virtual world. Therefore, many approaches address this challenge and incorporate a virtual model of the user's body in the VE. Such a virtual body (VB) is manipulated according to user motions which are defined by feature points detected by a tracking system. The required tracking devices are unsuitable in scenarios which involve multiple persons simultaneously or in which participants frequently change. Furthermore, individual characteristics such as skin pigmentation, hairiness or clothes are not considered by this procedure. In this paper we present a software-based approach that allows to incorporate a realistic visual representation of oneself in the VE. The idea is to make use of images captured by cameras that are attached to video-see-through head-mounted displays. These egocentric frames can be segmented into foreground showing parts of the human body and background. Then the extremities can be overlayed with the user's current view of the virtual world, and thus a high-fidelity virtual body can be visualized. |
2008
|
 | Frank Steinicke; Gerd Bruder; Timo Ropinski; Klaus H. Hinrichs The Holodeck Construction Manual Proceedings Article In: Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH) (Conference DVD), 2008. @inproceedings{SBRH08,
title = {The Holodeck Construction Manual},
author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH08.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the ACM International Conference and Exhibition on Computer Graphics and Interactive Techniques (SIGGRAPH) (Conference DVD)},
abstract = {Immersive virtual reality (IVR) systems allow users to interact in virtual environments (VEs), but in these systems, e.g., six-wall CAVEs with outside-in optical tracking, presence is limited to the virtual world and the physical surrounding cannot be perceived. Real walking is the most intuitive way of moving through such a setup as well as through our real world. Unfortunately, typical IVEs have only limited interaction space in contrast to the potentially infinite VE. In the last years an enormous effort has been undertaken in order to allow omnidirectional walking of arbitrary distances in VEs. Appropriate hardware-based approaches are very costly Bouguila and thus will probably not get beyond a prototype stage in the near future. We propose an alternative approach, which is motivated by the theory of perception. We exploit the fact that the human's visual sense may vary from the proprioceptive and vestibular senses without humans noticing a difference. Thus it becomes possible to direct the user on a physical path which may vary from the path perceived in the IVE. To exploit this limitation of the human sensory system we have extended redirected walking by introducing motion compression and gains, which scale the real distance a user walks, rotation compression and gains, which make the real turns smaller or larger, and curvature gains, which bend the user's walking direction such that s/he walks on a curve. Furthermore, we propose the concept of dynamic passive haptics which extends passive haptics in such a way that any number of virtual objects can be sensed by means of real proxy objects having similar haptic capabilities. Thus, dynamic passive haptics provide the user with the illusion of interacting with a desired virtual object by touching a corresponding proxy object. By exploiting these proposed concepts, finally the virtual holodeck construction manual can be written. Such an IVE provides sufficient space to make the users walk arbitrarily and sense any objects in the VE by means of touching an associated proxy object.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Immersive virtual reality (IVR) systems allow users to interact in virtual environments (VEs), but in these systems, e.g., six-wall CAVEs with outside-in optical tracking, presence is limited to the virtual world and the physical surrounding cannot be perceived. Real walking is the most intuitive way of moving through such a setup as well as through our real world. Unfortunately, typical IVEs have only limited interaction space in contrast to the potentially infinite VE. In the last years an enormous effort has been undertaken in order to allow omnidirectional walking of arbitrary distances in VEs. Appropriate hardware-based approaches are very costly Bouguila and thus will probably not get beyond a prototype stage in the near future. We propose an alternative approach, which is motivated by the theory of perception. We exploit the fact that the human's visual sense may vary from the proprioceptive and vestibular senses without humans noticing a difference. Thus it becomes possible to direct the user on a physical path which may vary from the path perceived in the IVE. To exploit this limitation of the human sensory system we have extended redirected walking by introducing motion compression and gains, which scale the real distance a user walks, rotation compression and gains, which make the real turns smaller or larger, and curvature gains, which bend the user's walking direction such that s/he walks on a curve. Furthermore, we propose the concept of dynamic passive haptics which extends passive haptics in such a way that any number of virtual objects can be sensed by means of real proxy objects having similar haptic capabilities. Thus, dynamic passive haptics provide the user with the illusion of interacting with a desired virtual object by touching a corresponding proxy object. By exploiting these proposed concepts, finally the virtual holodeck construction manual can be written. Such an IVE provides sufficient space to make the users walk arbitrarily and sense any objects in the VE by means of touching an associated proxy object. |
 | Frank Steinicke; Gerd Bruder; Luv Kohli; Jason Jerald; Klaus H. Hinrichs Taxonomy and Implementation of Redirection Techniques for Ubiquitous Passive Haptic Feedback Proceedings Article In: Proceedings of the International Conference on Cyberworlds (CW), pp. 217–223, IEEE Press, 2008, ((acceptance rate 39%)). @inproceedings{SBKJH08,
title = {Taxonomy and Implementation of Redirection Techniques for Ubiquitous Passive Haptic Feedback},
author = { Frank Steinicke and Gerd Bruder and Luv Kohli and Jason Jerald and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBKJH08.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the International Conference on Cyberworlds (CW)},
pages = {217--223},
publisher = {IEEE Press},
abstract = {Traveling through immersive virtual environments (IVEs) by means of real walking is an important activity to increase naturalness of VR-based interaction. However, the size of the virtual world often exceeds the size of the tracked space so that a straightforward implementation of omni-directional and unlimited walking is not possible. Redirected walking is one concept to solve this problem of walking in VEs by inconspicuously guiding the user on a physical path that may differ from the path the user visually perceives. When the user approaches a virtual object she can be redirected to a real proxy object that is registered to the virtual counterpart and provides passive haptic feedback. In such passive haptic environments, any number of virtual objects can be mapped to proxy objects having similar haptic properties, e.g., size, shape and texture. The user can sense a virtual object by touching its real world counterpart. Redirecting a user to a registered proxy object makes it necessary to predict the user's target location in the VE. Based on this prediction we determine a path through the physical space such that the user is guided to the registered proxy object. We present a taxonomy of possible redirection techniques that enable user guidance such that inconsistencies between visual and proprioceptive stimuli are imperceptible. We describe how a user's target in the virtual world can be predicted reliably and how a corresponding real-world path to the registered proxy object can be derived.},
note = {(acceptance rate 39%)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Traveling through immersive virtual environments (IVEs) by means of real walking is an important activity to increase naturalness of VR-based interaction. However, the size of the virtual world often exceeds the size of the tracked space so that a straightforward implementation of omni-directional and unlimited walking is not possible. Redirected walking is one concept to solve this problem of walking in VEs by inconspicuously guiding the user on a physical path that may differ from the path the user visually perceives. When the user approaches a virtual object she can be redirected to a real proxy object that is registered to the virtual counterpart and provides passive haptic feedback. In such passive haptic environments, any number of virtual objects can be mapped to proxy objects having similar haptic properties, e.g., size, shape and texture. The user can sense a virtual object by touching its real world counterpart. Redirecting a user to a registered proxy object makes it necessary to predict the user's target location in the VE. Based on this prediction we determine a path through the physical space such that the user is guided to the registered proxy object. We present a taxonomy of possible redirection techniques that enable user guidance such that inconsistencies between visual and proprioceptive stimuli are imperceptible. We describe how a user's target in the virtual world can be predicted reliably and how a corresponding real-world path to the registered proxy object can be derived. |
 | Frank Steinicke; Gerd Bruder; Timo Ropinski; Klaus H. Hinrichs Moving Towards Generally Applicable Redirected Walking Proceedings Article In: Proceedings of the Virtual Reality International Conference (VRIC), pp. 15–24, IEEE Press, 2008. @inproceedings{SBRH08a,
title = {Moving Towards Generally Applicable Redirected Walking},
author = { Frank Steinicke and Gerd Bruder and Timo Ropinski and Klaus H. Hinrichs},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBRH08a.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the Virtual Reality International Conference (VRIC)},
pages = {15--24},
publisher = {IEEE Press},
abstract = {Walking is the most natural way of moving within a virtual environment (VE). Mapping the user's movement one-to-one to the real world clearly has the drawback that the limited range of the tracking sensors and a rather small working space in the real word restrict the user's interaction. In this paper we introduce concepts for virtual locomotion interfaces that support exploration of large-scale virtual environments by redirected walking. Based on the results of a user study we have quantified to which degree users can unknowingly be redirected in order to guide them through an arbitrarily sized VE in which virtual paths differ from the paths tracked in the real working space. We describe the concepts of generic redirected walking in detail and present implications that have been derived from the initially conducted user study. Furthermore we discuss example applications from different domains in order to point out the benefits of our approach.},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Walking is the most natural way of moving within a virtual environment (VE). Mapping the user's movement one-to-one to the real world clearly has the drawback that the limited range of the tracking sensors and a rather small working space in the real word restrict the user's interaction. In this paper we introduce concepts for virtual locomotion interfaces that support exploration of large-scale virtual environments by redirected walking. Based on the results of a user study we have quantified to which degree users can unknowingly be redirected in order to guide them through an arbitrarily sized VE in which virtual paths differ from the paths tracked in the real working space. We describe the concepts of generic redirected walking in detail and present implications that have been derived from the initially conducted user study. Furthermore we discuss example applications from different domains in order to point out the benefits of our approach. |
 | Frank Steinicke; Gerd Bruder; Jason Jerald; Harald Frenz; Markus Lappe Analyses of Human Sensitivity to Redirected Walking Proceedings Article In: Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST), pp. 149–156, 2008, ((acceptance rate 17%)). @inproceedings{SBJFL08,
title = {Analyses of Human Sensitivity to Redirected Walking},
author = { Frank Steinicke and Gerd Bruder and Jason Jerald and Harald Frenz and Markus Lappe},
url = {https://sreal.ucf.edu/wp-content/uploads/2017/02/SBJFL08.pdf},
year = {2008},
date = {2008-01-01},
booktitle = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology (VRST)},
pages = {149--156},
abstract = {Redirected walking allows users to walk through large-scale immersive virtual environments (IVEs) while physically remaining in a reasonably small workspace by intentionally injecting scene motion into the IVE. In a constant stimuli experiment with a two-alternative-forced-choice task we have quantified how much humans can unknowingly be redirected on virtual paths which are different from the paths they actually walk. 18 subjects have been tested in four different experiments: (E1a) discrimination between virtual and physical rotation, (E1b) discrimination between two successive rotations, (E2) discrimination between virtual and physical translation, and discrimination of walking direction (E3a) without and (E3b) with start-up. In experiment E1a subjects performed rotations to which different gains have been applied, and then had to choose whether or not the visually perceived rotation was greater than the physical rotation. In experiment E1b subjects discriminated between two successive rotations where different gains have been applied to the physical rotation. In experiment E2 subjects chose if they thought that the physical walk was longer than the visually perceived scaled travel distance. In experiment E3a subjects walked a straight path in the IVE which was physically bent to the left or to the right, and they estimate the direction of the curvature. In experiment E3a the gain was applied immediately, whereas the gain was applied after a start-up of two meters in experiment E3b. Our results show that users can be turned physically about 68% more or 10% less than the perceived virtual rotation, distances can be up- or down-scaled by 22%, and users can be redirected on an circular arc with a radius greater than $24$ meters while they believe they are walking straight.},
note = {(acceptance rate 17%)},
keywords = {},
pubstate = {published},
tppubtype = {inproceedings}
}
Redirected walking allows users to walk through large-scale immersive virtual environments (IVEs) while physically remaining in a reasonably small workspace by intentionally injecting scene motion into the IVE. In a constant stimuli experiment with a two-alternative-forced-choice task we have quantified how much humans can unknowingly be redirected on virtual paths which are different from the paths they actually walk. 18 subjects have been tested in four different experiments: (E1a) discrimination between virtual and physical rotation, (E1b) discrimination between two successive rotations, (E2) discrimination between virtual and physical translation, and discrimination of walking direction (E3a) without and (E3b) with start-up. In experiment E1a subjects performed rotations to which different gains have been applied, and then had to choose whether or not the visually perceived rotation was greater than the physical rotation. In experiment E1b subjects discriminated between two successive rotations where different gains have been applied to the physical rotation. In experiment E2 subjects chose if they thought that the physical walk was longer than the visually perceived scaled travel distance. In experiment E3a subjects walked a straight path in the IVE which was physically bent to the left or to the right, and they estimate the direction of the curvature. In experiment E3a the gain was applied immediately, whereas the gain was applied after a start-up of two meters in experiment E3b. Our results show that users can be turned physically about 68% more or 10% less than the perceived virtual rotation, distances can be up- or down-scaled by 22%, and users can be redirected on an circular arc with a radius greater than $24$ meters while they believe they are walking straight. |
 | Frank Steinicke; Gerd Bruder; Klaus H. Hinrichs; Timo Ropinski; Mario Lopes Advances in Human-Computer Interaction: 3D User Interfaces for Collaborative Works Book Chapter In: pp. 279–294, In-Tech, 2008. @inbook{SBHRL08,
title = {Advances in Human-Computer Interaction: 3D User Interfaces for Collaborative Works},
author = { Frank Steinicke and Gerd Bruder and Klaus H. Hinrichs and Timo Ropinski and Mario Lopes},
year = {2008},
date = {2008-01-01},
pages = {279--294},
publisher = {In-Tech},
abstract = {Desktop environments have proven to be a powerful user interface and are used as the de facto standard human-computer interaction paradigm for over 20 years. However, there is a rising demand on 3D applications dealing with complex datasets, which exceeds the possibilities provided by traditional devices or two-dimensional display. For these domains more immersive and intuitive interfaces are required. But in order to get the users' acceptance, technology-driven solutions that require inconvenient instrumentation, e.g., stereo glasses or tracked gloves, should be avoided. Autostereoscopic display environments equipped with tracking systems enable users to experience 3D virtual environments more natural without annoying devices, for instance via gestures. However, currently these approaches are only applied for specially designed or adapted applications without universal usability. Although these systems provide enough space to support multi-user, additional costs and inconvenient instrumentation hinder acceptance of these user interfaces. In this chapter we introduce new collaborative 3D user interface concepts for such setups where minimal instrumentation of the user is required such that the strategies can be easily integrated in everyday working environments. Therefore, we propose an interaction system and framework, which allows displaying and interacting with both mono- as well as stereoscopic content in parallel. Furthermore, the setup enables multiple users to view the same data simultaneously. The challenges for combined mouse-, keyboard- and gesture-based input paradigms in such an environment are pointed out and novel interaction strategies are introduced.},
keywords = {},
pubstate = {published},
tppubtype = {inbook}
}
Desktop environments have proven to be a powerful user interface and are used as the de facto standard human-computer interaction paradigm for over 20 years. However, there is a rising demand on 3D applications dealing with complex datasets, which exceeds the possibilities provided by traditional devices or two-dimensional display. For these domains more immersive and intuitive interfaces are required. But in order to get the users' acceptance, technology-driven solutions that require inconvenient instrumentation, e.g., stereo glasses or tracked gloves, should be avoided. Autostereoscopic display environments equipped with tracking systems enable users to experience 3D virtual environments more natural without annoying devices, for instance via gestures. However, currently these approaches are only applied for specially designed or adapted applications without universal usability. Although these systems provide enough space to support multi-user, additional costs and inconvenient instrumentation hinder acceptance of these user interfaces. In this chapter we introduce new collaborative 3D user interface concepts for such setups where minimal instrumentation of the user is required such that the strategies can be easily integrated in everyday working environments. Therefore, we propose an interaction system and framework, which allows displaying and interacting with both mono- as well as stereoscopic content in parallel. Furthermore, the setup enables multiple users to view the same data simultaneously. The challenges for combined mouse-, keyboard- and gesture-based input paradigms in such an environment are pointed out and novel interaction strategies are introduced. |