Publications
This page is automatically generated from the White Rose database using name-string queries. It has known inaccuracies - please contact the authors directly to confirm data.
Many scientific and engineering problems are modelled by simulating scalar fields defined either on space-filling meshes (Eulerian) or as particles (Lagrangian). For analysis and visualization, topological primitives such as contour trees can be used, but these often need simplification to filter out small-scale features. For parcelbased convective cloud simulations, simplification of the contour tree requires a volumetric measure rather than persistence. Unlike for cubic meshes, volume cannot be approximated by counting regular vertices. Typically, this is addressed by resampling irregular data onto a uniform grid. Unfortunately, the spatial proximity of parcels requires a high sampling frequency, resulting in a massive increase in data size for processing. We therefore extend volumebased contour tree simplification to parcel-in-cell simulations with a graph adaptor in Viskores (VTK-m), using Delaunay tetrahedralization of the parcel centroids as input. Instead of relying on a volume approximation by counting regular vertices - as was done for cubic meshes - we adapt the 2D area splines reported by Bajaj et al. [1], and Zhou et al. [37]. We implement this in Viskores (formerly called VTK-m) as prefix-sum style hypersweeps for parallel efficiency and show how it can be generalized to compute any integrable property. Finally, our results reveal that contour trees computed directly on the parcels are orders of magnitude faster than computing them on a resampled grid, while also arguably offering better quality segmentation, avoiding interpolation artifacts.
@misc{wrro230600,
title = {Lagrangian Simulation Volume-Based Contour Tree Simplification},
year = {2025},
booktitle = {IEEE VIS Workshop TopoInVis},
publisher = {IEEE},
note = {In Press
This is an author produced version of a proceedings paper accepted for publication in 2025 IEEE Topological Data Analysis and Visualization (TopoInVis) made available under the terms of the Creative Commons Attribution License (CC-BY), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
journal = {2025 IEEE Topological Data Analysis and Visualization (TopoInVis)},
month = {August},
abstract = {Many scientific and engineering problems are modelled by simulating scalar fields defined either on space-filling meshes (Eulerian) or
as particles (Lagrangian). For analysis and visualization, topological primitives such as contour trees can be used, but these often
need simplification to filter out small-scale features. For parcelbased convective cloud simulations, simplification of the contour
tree requires a volumetric measure rather than persistence. Unlike
for cubic meshes, volume cannot be approximated by counting regular vertices. Typically, this is addressed by resampling irregular
data onto a uniform grid. Unfortunately, the spatial proximity of
parcels requires a high sampling frequency, resulting in a massive
increase in data size for processing. We therefore extend volumebased contour tree simplification to parcel-in-cell simulations with
a graph adaptor in Viskores (VTK-m), using Delaunay tetrahedralization of the parcel centroids as input. Instead of relying on a
volume approximation by counting regular vertices - as was done
for cubic meshes - we adapt the 2D area splines reported by Bajaj
et al. [1], and Zhou et al. [37]. We implement this in Viskores
(formerly called VTK-m) as prefix-sum style hypersweeps for parallel efficiency and show how it can be generalized to compute any
integrable property. Finally, our results reveal that contour trees
computed directly on the parcels are orders of magnitude faster than
computing them on a resampled grid, while also arguably offering
better quality segmentation, avoiding interpolation artifacts.},
author = {Dilys, D. and Carr, H. A. and Boeing, S. J.},
url = {https://eprints.whiterose.ac.uk/id/eprint/230600/}
}
Dashboards have arguably been the most used visualizations during the COVID-19 pandemic. They were used to communicate its evolution to national governments for disaster mitigation, to the public domain to inform about its status, and to epidemiologists to comprehend and predict the evolution of the disease. Each design had to be tailored for different tasks and to varying audiences - in many cases set up in a very short time due to the urgent need. In this paper, we collect notable examples of dashboards and reflect on their use and design during the pandemic from a user-oriented perspective: we interview a group of researchers with varying visualization expertise who actively used dashboards during the pandemic as part of their daily workflow. We discuss our findings and compile a list of lessons learned to support future visualization researchers and dashboard designers.
@article{wrro225692,
number = {2},
note = {This is an author produced version of an article published in IEEE Computer Graphics and Applications, made available under the terms of the Creative Commons Attribution License (CC-BY), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
doi = {10.1109/MCG.2025.3538257},
year = {2025},
volume = {45},
journal = {IEEE Computer Graphics and Applications},
month = {June},
pages = {135--142},
publisher = {IEEE},
title = {Reflections on the Use of Dashboards in the Covid-19 Pandemic},
issn = {0272-1716},
keywords = {COVID-19, Visualization, Pandemics, Prevention and mitigation, Government, Public healthcare, Interviews, Fake news, Diseases},
abstract = {Dashboards have arguably been the most used visualizations during
the COVID-19 pandemic. They were used to communicate its evolution to national
governments for disaster mitigation, to the public domain to inform about its status,
and to epidemiologists to comprehend and predict the evolution of the disease. Each
design had to be tailored for different tasks and to varying audiences - in many cases
set up in a very short time due to the urgent need. In this paper, we collect notable
examples of dashboards and reflect on their use and design during the pandemic
from a user-oriented perspective: we interview a group of researchers with varying
visualization expertise who actively used dashboards during the pandemic as
part of their daily workflow. We discuss our findings and compile a list of lessons
learned to support future visualization researchers and dashboard designers.},
author = {Arleo, A. and Borgo, R. and Kohlhammer, J. and Ruddle, R. A. and Scharlach, H. and Yuan, X.},
url = {https://ieeexplore.ieee.org/document/11033413}
}
Foveated rendering techniques have seen recent development with the advent of commercial head-mounted displays with eye-tracking capabilities. The main drive is to exploit particular features of our peripheral vision that allow optimizing rendering pipelines, which allows using less computational effort where the human visual system may be unaware of differences. Most efforts have been focused on simplifying spatial visual detail on areas not being focused on by adjusting acuity of shading models, sharpness of images, and pixel density. However, other perception pipeline areas are also influential, particularly in certain purpose-specific applications. In this paper, we demonstrate it is possible to reduce animation rates in crowd simulations up to a complete stop for agents in our peripheral vision without users noticing the effect. We implemented a prototype Unity3D application with typical crowd simulation scenarios and carried out user experiments to study subjects' perception to changes in animation rates. We find that in the best case we were able to reduce the number of operations by 99.3\% compared to an unfoveated scenario, with opportunities for developments combined with other acceleration techniques. This paper also includes an in-depth discussion about human perception of movement in peripheral vision with novel ideas that will have applications beyond crowd simulation.
@article{wrro233099,
publisher = {Association for Computing Machinery (ACM)},
title = {Foveated Animations for Efficient Crowd Simulation},
journal = {Proceedings of the ACM on Computer Graphics and Interactive Techniques},
month = {May},
volume = {8},
year = {2025},
doi = {10.1145/3728306},
note = {{\copyright} 2025 Copyright held by the owner/author(s).
This work is licensed under a Creative Commons Attribution 4.0 International License.},
number = {1},
abstract = {Foveated rendering techniques have seen recent development with the advent of commercial head-mounted displays with eye-tracking capabilities. The main drive is to exploit particular features of our peripheral vision that allow optimizing rendering pipelines, which allows using less computational effort where the human visual system may be unaware of differences. Most efforts have been focused on simplifying spatial visual detail on areas not being focused on by adjusting acuity of shading models, sharpness of images, and pixel density. However, other perception pipeline areas are also influential, particularly in certain purpose-specific applications. In this paper, we demonstrate it is possible to reduce animation rates in crowd simulations up to a complete stop for agents in our peripheral vision without users noticing the effect. We implemented a prototype Unity3D application with typical crowd simulation scenarios and carried out user experiments to study subjects' perception to changes in animation rates. We find that in the best case we were able to reduce the number of operations by 99.3\% compared to an unfoveated scenario, with opportunities for developments combined with other acceleration techniques. This paper also includes an in-depth discussion about human perception of movement in peripheral vision with novel ideas that will have applications beyond crowd simulation.},
keywords = {Perception, Animation, Foveated techniques, Virtual Reality},
issn = {2577-6193},
url = {https://dl.acm.org/doi/10.1145/3728306},
author = {Stancu, F.-V. and Weiss, T. and dos Anjos, R. K.}
}
We present the first combinatorial algorithm for efficiently computing the Reeb space in all dimensions. The Reeb space is a higher-dimensional generalization of the Reeb graph, which is standard practice in the analysis of scalar fields, along with other computational topology tools such as persistent homology and the Morse-Smale complex. One significant limitation of topological tools for scalar fields is that data often involves multiple variables, where joint analysis is more insightful. Generalizing topological data structures to multivariate data has proven challenging and the Reeb space is one of the few available options. However, none of the existing algorithms can efficiently compute the Reeb space in arbitrary dimensions and there are no available implementations which are robust with respect to numerical errors. We propose a new algorithm for computing the Reeb space of a generic piecewise linear map over a simplicial mesh of any dimension called arrange and traverse. We implement a robust specialization of our algorithm for tetrahedral meshes and evaluate it on real-life data.
@article{wrro229610,
title = {Arrange and Traverse Algorithm for Computation of Reeb Spaces
of Piecewise Linear Maps},
year = {2025},
publisher = {Wiley},
note = {In Press
This is an author produced version of an article accepted for publication in Computer Graphics Forum, made available under the terms of the Creative Commons Attribution License (CC-BY), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
journal = {Computer Graphics Forum},
month = {May},
issn = {0167-7055},
abstract = {We present the first combinatorial algorithm for efficiently computing the Reeb space in all dimensions. The Reeb space is a
higher-dimensional generalization of the Reeb graph, which is standard practice in the analysis of scalar fields, along with
other computational topology tools such as persistent homology and the Morse-Smale complex. One significant limitation
of topological tools for scalar fields is that data often involves multiple variables, where joint analysis is more insightful.
Generalizing topological data structures to multivariate data has proven challenging and the Reeb space is one of the few
available options. However, none of the existing algorithms can efficiently compute the Reeb space in arbitrary dimensions
and there are no available implementations which are robust with respect to numerical errors. We propose a new algorithm
for computing the Reeb space of a generic piecewise linear map over a simplicial mesh of any dimension called arrange and
traverse. We implement a robust specialization of our algorithm for tetrahedral meshes and evaluate it on real-life data.},
author = {Hristov, P. and Sakurai, D. and Carr, H. and Hotz, I. and Bin Masood, T.},
url = {https://eprints.whiterose.ac.uk/id/eprint/229610/}
}
Multiple choice questions (MCQs) are widely used to assess students. Motivated by issues with accuracy and reliability that were found during university exams, we conducted a controlled user experiment with 53 participants and a commercial MCQ system that used an AI engine for grading. Each participant filled in three paper answer sheets to a prescribed pattern, one with a black pen, and the others with heavy and light pencil shading. The pattern contained 100 questions (an equal number with one, two, three, four and five correct answers). The sheets were digitized using two scanners, with each set of scans graded separately and producing a similar pattern of results. In the pen condition, the AI engine did not make any grading errors and was uncertain for 0.8\% of answers (those needed to be graded by hand). However, the AI engine made grading errors for 0.25\% of the heavy pencil answers and 4.9\% of the light pencil answers, and was uncertain for many more answers. The results show that AI grading was only reliable when participants used a pen, which raises concerns about the guidance some organizations provide for students to use a pencil. From an explainable AI perspective, conducting rigorous user evaluations would improve transparency about AI products for enduser stakeholders, help AI developers understand the limitations of their models and identify checks and balances that should be incorporated.
@misc{wrro229013,
booktitle = {6th International Conference on Artificial Intelligence in Education Technology},
publisher = {Springer Nature},
year = {2025},
series = {Lecture Notes on Data Engineering and Communications Technologies},
title = {An evaluation of AI-based grading of multiple choice assessments},
month = {April},
journal = {TBC},
note = {In Press},
keywords = {Multiple choice assessment, User evaluation, Explainable AI},
issn = {2367-4512},
abstract = {Multiple choice questions (MCQs) are widely used to assess students. Motivated by issues with accuracy and reliability that were found during
university exams, we conducted a controlled user experiment with 53 participants and a commercial MCQ system that used an AI engine for grading. Each participant filled in three paper answer sheets to a prescribed pattern, one with a black pen, and the others with heavy and light pencil shading. The pattern contained 100 questions (an equal number with one, two, three, four and five correct answers). The sheets were digitized using two scanners, with each set of scans graded separately and producing a similar pattern of results. In the pen condition, the AI engine did not make any grading errors and was uncertain for 0.8\% of answers (those needed to be graded by hand). However, the AI engine made grading errors for 0.25\% of the heavy pencil answers and 4.9\% of the light pencil answers, and was uncertain for many more answers. The results show that AI grading was only reliable when participants used a pen, which raises concerns about the guidance some organizations provide for students to use a pencil. From an explainable AI perspective, conducting rigorous user evaluations would improve transparency about AI products for enduser stakeholders, help AI developers understand the limitations of their models and identify checks and balances that should be incorporated.},
author = {Ruddle, R. A. and Naqvi, S.},
url = {https://eprints.whiterose.ac.uk/id/eprint/229013/}
}
Real camera footage is subject to noise, motion blur (MB) and depth of field (DoF). In some applications these might be considered distortions to be removed, but in others it is important to model them because it would be ineffective, or interfere with an aesthetic choice, to simply remove them. In augmented reality applications where virtual content is composed into a live video feed, we can model noise, MB and DoF to make the virtual content visually consistent with the video. Existing methods for this typically suffer two main limitations. First, they require a camera calibration step to relate a known calibration target to the specific cameras response. Second, existing work require methods that can be (differentiably) tuned to the calibration, such as slow and specialized neural networks. We propose a method which estimates parameters for noise, MB and DoF instantly, which allows using off-the-shelf real-time simulation methods from e.g., a game engine in compositing augmented content. Our main idea is to unlock both features by showing how to use modern computer vision methods that can remove noise, MB and DoF from the video stream, essentially providing self-calibration. This allows to auto-tune any black-box real-time noise+MB+DoF method to deliver fast and high-fidelity augmentation consistency.
@article{wrro224303,
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {March},
note = {Published online
{\copyright} 2025 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
year = {2025},
doi = {10.1109/tvcg.2025.3549541},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {Blind Augmentation: Calibration-free Camera Distortion Model Estimation for Real-time Mixed-reality Consistency},
url = {https://eprints.whiterose.ac.uk/id/eprint/224303/},
author = {Prakash, S. and Walton, D. R. and Anjos, R. K. D. and Steed, A. and Ritschel, T.},
abstract = {Real camera footage is subject to noise, motion blur (MB) and depth of field (DoF). In some applications these might be considered distortions to be removed, but in others it is important to model them because it would be ineffective, or interfere with an aesthetic choice, to simply remove them. In augmented reality applications where virtual content is composed into a live video feed, we can model noise, MB and DoF to make the virtual content visually consistent with the video. Existing methods for this typically suffer two main limitations. First, they require a camera calibration step to relate a known calibration target to the specific cameras response. Second, existing work require methods that can be (differentiably) tuned to the calibration, such as slow and specialized neural networks. We propose a method which estimates parameters for noise, MB and DoF instantly, which allows using off-the-shelf real-time simulation methods from e.g., a game engine in compositing augmented content. Our main idea is to unlock both features by showing how to use modern computer vision methods that can remove noise, MB and DoF from the video stream, essentially providing self-calibration. This allows to auto-tune any black-box real-time noise+MB+DoF method to deliver fast and high-fidelity augmentation consistency.},
issn = {1077-2626},
keywords = {Augmented Reality, optimization}
}
Feature level sets (FLS) have shown significant potential in the analysis of multi-field data by using traits defined in attribute space to specify features in the domain. In this work, we address key challenges in the practical use of FLS: trait design and feature selection for rendering. To simplify trait design, we propose a Cartesian decomposition of traits into simpler components, making the process more intuitive and computationally efficient. Additionally, we utilize dictionary learning results to automatically suggest point traits. To enhance feature selection, we introduce trait-induced merge trees (TIMTs), a generalization of merge trees for feature level sets, aimed at topologically analyzing tensor fields or general multi-variate data. The leaves in the TIMT represent areas in the input data that are closest to the defined trait, thereby most closely resembling the defined feature. This merge tree provides a hierarchy of features, enabling the querying of the most relevant and persistent features. Our method includes various query techniques for the tree, allowing the highlighting of different aspects. We demonstrate the cross-application capabilities of this approach through five case studies from different domains.
@article{wrro221814,
title = {Multi-field Visualization: Trait design and trait-induced merge trees},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
doi = {10.1109/tvcg.2025.3525974},
year = {2025},
note = {{\copyright} 2025 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
month = {January},
journal = {IEEE Transactions on Visualization and Computer Graphics},
abstract = {Feature level sets (FLS) have shown significant potential in the analysis of multi-field data by using traits defined in attribute space to specify features in the domain. In this work, we address key challenges in the practical use of FLS: trait design and feature selection for rendering. To simplify trait design, we propose a Cartesian decomposition of traits into simpler components, making the process more intuitive and computationally efficient. Additionally, we utilize dictionary learning results to automatically suggest point traits. To enhance feature selection, we introduce trait-induced merge trees (TIMTs), a generalization of merge trees for feature level sets, aimed at topologically analyzing tensor fields or general multi-variate data. The leaves in the TIMT represent areas in the input data that are closest to the defined trait, thereby most closely resembling the defined feature. This merge tree provides a hierarchy of features, enabling the querying of the most relevant and persistent features. Our method includes various query techniques for the tree, allowing the highlighting of different aspects. We demonstrate the cross-application capabilities of this approach through five case studies from different domains.},
issn = {1077-2626},
keywords = {Trait design, Trait-induced Merge Tree, Dictionary learning, Cartesian decomposition, Application-oriented
Visualization design},
url = {https://eprints.whiterose.ac.uk/id/eprint/221814/},
author = {Lei, D. and Jankowai, J. and Hristov, P. and Carr, H. and Denby, L. and Bin Masood, T.}
}
A significant challenge on an exascale computer is the speed at which we compute results exceeds by many orders of magnitude the speed at which we save these results. Therefore the Exascale Computing Project (ECP) ALPINE project focuses on providing exascale-ready visualization solutions including in situ processing. In situ visualization and analysis runs as the simulation is run, on simulations results are they are generated avoiding the need to save entire simulations to storage for later analysis. The ALPINE project made post hoc visualization tools, ParaView and VisIt, exascale ready and developed in situ algorithms and infrastructures. The suite of ALPINE algorithms developed under ECP includes novel approaches to enable automated data analysis and visualization to focus on the most important aspects of the simulation. Many of the algorithms also provide data reduction benefits to meet the I/O challenges at exascale. ALPINE developed a new lightweight in situ infrastructure, Ascent.
@article{wrro216565,
number = {1},
doi = {10.1177/10943420241286521},
year = {2025},
pages = {32--51},
volume = {39},
journal = {International Journal of High Performance Computing Applications},
month = {January},
title = {The ECP ALPINE project: In Situ and Post Hoc Visualization Infrastructure and Analysis Capabilities for Exascale},
publisher = {SAGE},
issn = {1094-3420},
abstract = {A significant challenge on an exascale computer is the speed at which we compute results exceeds by many orders of magnitude the speed at which we save these results. Therefore the Exascale Computing Project (ECP) ALPINE project focuses on providing exascale-ready visualization solutions including in situ processing. In situ visualization and analysis runs as the simulation is run, on simulations results are they are generated avoiding the need to save entire simulations to storage for later analysis. The ALPINE project made post hoc visualization tools, ParaView and VisIt, exascale ready and developed in situ algorithms and infrastructures. The suite of ALPINE algorithms developed under ECP includes novel approaches to enable automated data analysis and visualization to focus on the most important aspects of the simulation. Many of the algorithms also provide data reduction benefits to meet the I/O challenges at exascale. ALPINE developed a new lightweight in situ infrastructure, Ascent.},
author = {Ahrens, J. and Arienti, M. and Ayachit, U. and Bennett, J. and Binyahib, R. and Bremer, P.-T. and Brugger, E. and Bujack, R. and Carr, H. and Chen, J. and Childs, H. and Dutta, S. and Essiari, A. and Geveci, B. and Harrison, C. and Hazarika, S. and Hickman Fulp, M. L. and Hristov, P. G. and Huang, X. and Insley, J. and Kawakami, Y. and Keilers, C. and Kress, J. and Larsen, M. and Lipsa, D. and Majumder, M. and Marsaglia, N. and Mateevitsi, V. A. and Pascucci, V. and Patchett, J. and Patel, S. and Petruzza, S. and Pugmire, D. and Rizzi, S. and Rogers, D. and R{\"u}bel, O. and Salinas, J. and Sane, S. and Shudler, S. and Stewart, A. and Tsai, K. and Turton, T. L. and Usher, W. and Wang, Z. and Weber, G. and Wetterer-Nelson, C. and Woodring, J. and Yenpure, A.},
url = {https://eprints.whiterose.ac.uk/id/eprint/216565/}
}
Set-type data occurs in many domains such as life sciences (Lamy & Tsopra, 2019), health (Landolfi et al., 2022) and the retail industry (Adnan & Ruddle, 2018), as well as in generic applications such as analysing structures of missing data (Ruddle, Adnan, & Hall, 2022) and association rule mining (Wang et al., 2020). SetVis is new matrix-based set visualization software, implemented as a Python package which is available from PyPi. The documentation is available from setvis.readthedocs.io which contains various hands-on Jupyter notebooks guiding users to use the package. SetVis uses a memory-efficient design, operates with datasets held in RAM or a database such as PostgreSQL, and allows user interaction graphically or programmatically. A technical evaluation shows that SetVis uses orders of magnitude less memory than the UpSet (Lex et al., 2014) Python package (Nothman, 2022).
@article{wrro225690,
doi = {10.21105/joss.06925},
year = {2024},
number = {103},
note = {Authors of JOSS papers retain copyright.
This work is licensed under a Creative Commons Attribution 4.0 International License.},
title = {SetVis: Visualizing Large Numbers of Sets and Intersections},
publisher = {Open Journals},
volume = {9},
month = {November},
journal = {The Journal of Open Source Software},
author = {Ruddle, R. A. and Hama, L. and Wochner, P. and Strickson, O. T.},
url = {https://joss.theoj.org/papers/10.21105/joss.06925},
issn = {2475-9066},
abstract = {Set-type data occurs in many domains such as life sciences (Lamy \& Tsopra, 2019), health
(Landolfi et al., 2022) and the retail industry (Adnan \& Ruddle, 2018), as well as in generic
applications such as analysing structures of missing data (Ruddle, Adnan, \& Hall, 2022) and
association rule mining (Wang et al., 2020). SetVis is new matrix-based set visualization
software, implemented as a Python package which is available from PyPi. The documentation
is available from setvis.readthedocs.io which contains various hands-on Jupyter notebooks
guiding users to use the package. SetVis uses a memory-efficient design, operates with datasets
held in RAM or a database such as PostgreSQL, and allows user interaction graphically or
programmatically. A technical evaluation shows that SetVis uses orders of magnitude less
memory than the UpSet (Lex et al., 2014) Python package (Nothman, 2022).}
}
Contour trees describe the topology of level sets in scalar fields and are widely used in topological data analysis and visualization. A main challenge of utilizing contour trees for large-scale scientific data is their computation at scale using highperformance computing. To address this challenge, recent work has introduced distributed hierarchical contour trees for distributed computation and storage of contour trees. However, effective use of these distributed structures in analysis and visualization requires subsequent computation of geometric properties and branch decomposition to support contour extraction and exploration. In this work, we introduce distributed algorithms for augmentation, hypersweeps, and branch decomposition that enable parallel computation of geometric properties, and support the use of distributed contour trees as query structures for scientific exploration. We evaluate the parallel performance of these algorithms and apply them to identify and extract important contours for scientific visualization.
@article{wrro216133,
title = {Distributed Augmentation, Hypersweeps, and Branch Decomposition of Contour Trees for Scientific Exploration},
publisher = {IEEE},
doi = {10.1109/TVCG.2024.3456322},
year = {2024},
note = {Published online
{\copyright} 2024 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
month = {September},
journal = {IEEE Transactions on Visualization and Computer Graphics},
url = {https://ieeexplore.ieee.org/document/10673997},
author = {Li, M. and Carr, H. and R{\"u}bel, O. and Wang, B. and Weber, G. H.},
abstract = {Contour trees describe the topology of level sets in scalar fields and are widely used in topological data analysis and visualization. A main challenge of utilizing contour trees for large-scale scientific data is their computation at scale using highperformance computing. To address this challenge, recent work has introduced distributed hierarchical contour trees for distributed computation and storage of contour trees. However, effective use of these distributed structures in analysis and visualization requires subsequent computation of geometric properties and branch decomposition to support contour extraction and exploration. In this work, we introduce distributed algorithms for augmentation, hypersweeps, and branch decomposition that enable parallel computation of geometric properties, and support the use of distributed contour trees as query structures for scientific exploration. We evaluate the parallel performance of these algorithms and apply them to identify and extract important contours for scientific visualization.},
issn = {1077-2626}
}
Introduction Structured medication reviews (SMRs), introduced in the United Kingdom (UK) in 2020, aim to enhance shared decision-making in medication optimisation, particularly for patients with multimorbidity and polypharmacy. Despite its potential, there is limited empirical evidence on the implementation of SMRs, and the challenges faced in the process. This study is part of a larger DynAIRx (Artificial Intelligence for dynamic prescribing optimisation and care integration in multimorbidity) project which aims to introduce Artificial Intelligence (AI) to SMRs and develop machine learning models and visualisation tools for patients with multimorbidity. Here, we explore how SMRs are currently undertaken and what barriers are experienced by those involved in them. Methods Qualitative focus groups and semi-structured interviews took place between 2022-2023. Six focus groups were conducted with doctors, pharmacists and clinical pharmacologists (n = 21), and three patient focus groups with patients with multimorbidity (n = 13). Five semi-structured interviews were held with 2 pharmacists, 1 trainee doctor, 1 policy-maker and 1 psychiatrist. Transcripts were analysed using thematic analysis. Results Two key themes limiting the effectiveness of SMRs in clinical practice were identified: 'Medication Reviews in Practice' and 'Medication-related Challenges'. Participants noted limitations to the efficient and effectiveness of SMRs in practice including the scarcity of digital tools for identifying and prioritising patients for SMRs; organisational and patient-related challenges in inviting patients for SMRs and ensuring they attend; the time-intensive nature of SMRs, the need for multiple appointments and shared decision-making; the impact of the healthcare context on SMR delivery; poor communication and data sharing issues between primary and secondary care; difficulties in managing mental health medications and specific challenges associated with anticholinergic medication. Conclusion SMRs are complex, time consuming and medication optimisation may require multiple follow-up appointments to enable a comprehensive review. There is a need for a prescribing support system to identify, prioritise and reduce the time needed to understand the patient journey when dealing with large volumes of disparate clinical information in electronic health records. However, monitoring the effects of medication optimisation changes with a feedback loop can be challenging to establish and maintain using current electronic health record systems.
@article{wrro216980,
note = {{\copyright} 2024 Abuzour et al. This is an open
access article distributed under the terms of the
Creative Commons Attribution License, which
permits unrestricted use, distribution, and
reproduction in any medium, provided the original
author and source are credited.},
number = {8},
doi = {10.1371/journal.pone.0299770},
editor = {K. Bennett},
year = {2024},
volume = {19},
month = {August},
journal = {PLOS ONE},
title = {A qualitative exploration of barriers to efficient and effective structured medication reviews in primary care: Findings from the DynAIRx study},
publisher = {Public Library of Science},
url = {https://journals.plos.org/plosone/article?id=10.1371/journal.pone.0299770},
author = {Abuzour, A. S. and Wilson, S. A. and Woodall, A. A. and Mair, F. S. and Clegg, A. and Shantsila, E. and Gabbay, M. and Abaho, M. and Aslam, A. and Bollegala, D. and Cant, H. and Griffiths, A. and Hama, L. and Leeming, G. and Lo, E. and Maskell, S. and O'Connell, M. and Popoola, O. and Relton, S. and Ruddle, R. A. and Schofield, P. and Sperrin, M. and Van Staa, T. and Buchan, I. and Walker, L. E.},
abstract = {Introduction
Structured medication reviews (SMRs), introduced in the United Kingdom (UK) in 2020, aim to enhance shared decision-making in medication optimisation, particularly for patients with multimorbidity and polypharmacy. Despite its potential, there is limited empirical evidence on the implementation of SMRs, and the challenges faced in the process. This study is part of a larger DynAIRx (Artificial Intelligence for dynamic prescribing optimisation and care integration in multimorbidity) project which aims to introduce Artificial Intelligence (AI) to SMRs and develop machine learning models and visualisation tools for patients with multimorbidity. Here, we explore how SMRs are currently undertaken and what barriers are experienced by those involved in them.
Methods
Qualitative focus groups and semi-structured interviews took place between 2022-2023. Six focus groups were conducted with doctors, pharmacists and clinical pharmacologists (n = 21), and three patient focus groups with patients with multimorbidity (n = 13). Five semi-structured interviews were held with 2 pharmacists, 1 trainee doctor, 1 policy-maker and 1 psychiatrist. Transcripts were analysed using thematic analysis.
Results
Two key themes limiting the effectiveness of SMRs in clinical practice were identified: 'Medication Reviews in Practice' and 'Medication-related Challenges'. Participants noted limitations to the efficient and effectiveness of SMRs in practice including the scarcity of digital tools for identifying and prioritising patients for SMRs; organisational and patient-related challenges in inviting patients for SMRs and ensuring they attend; the time-intensive nature of SMRs, the need for multiple appointments and shared decision-making; the impact of the healthcare context on SMR delivery; poor communication and data sharing issues between primary and secondary care; difficulties in managing mental health medications and specific challenges associated with anticholinergic medication.
Conclusion
SMRs are complex, time consuming and medication optimisation may require multiple follow-up appointments to enable a comprehensive review. There is a need for a prescribing support system to identify, prioritise and reduce the time needed to understand the patient journey when dealing with large volumes of disparate clinical information in electronic health records. However, monitoring the effects of medication optimisation changes with a feedback loop can be challenging to establish and maintain using current electronic health record systems.},
keywords = {Health Services and Systems; Health Sciences; Clinical Research; Health Services; Patient Safety; Networking and Information Technology R\&D (NITRD); Machine Learning and Artificial Intelligence; Organisation and delivery of services; Management and decision making; Individual care needs; Generic health relevance; Good Health and Well Being; Humans; Primary Health Care; Focus Groups; Male; Female; Polypharmacy; Qualitative Research; United Kingdom; Multimorbidity; Artificial Intelligence; Middle Aged; Aged; Adult},
issn = {1932-6203}
}
@article{wrro214495,
publisher = {Optica Publishing Group},
title = {Exploring atmospheric optical turbulence: observations across zenith angles.},
journal = {Applied Optics},
month = {June},
volume = {63},
pages = {e48--e53},
year = {2024},
doi = {10.1364/ao.519063},
number = {16},
note = {{\copyright} 2024 Optica Publishing Group. Published by Optica Publishing Group under the terms of the Creative Commons Attribution 4.0 License. Further distribution of this work must maintain attribution to the author(s) and the published article's title, journal citation, and DOI.},
issn = {1559-128X},
keywords = {Atomic, Molecular and Optical Physics; Physical Sciences},
url = {https://opg.optica.org/ao/fulltext.cfm?uri=ao-63-16-E48&id=549401###},
author = {Beesley, L. F. and Osborn, J. and Wilson, R. and Farley, O. J. D. and Griffiths, R. and Love, G. D.}
}
The longitudinal chromatic aberration (LCA) of the eye creates a chromatic blur on the retina that is an important cue for accommodation. Although this mechanism can work optimally in broadband illuminants such as daylight, it is not clear how the system responds to the narrowband illuminants used by many modern displays. Here, we measured pupil and accommodative responses as well as visual acuity under narrowband light-emitting diode (LED) illuminants of different peak wavelengths. Observers were able to accommodate under narrowband light and compensate for the LCA of the eye, with no difference in the variability of the steady-state accommodation response between narrowband and broadband illuminants. Intriguingly, our subjects compensated more fully for LCA at nearer distances. That is, the difference in accommodation to different wavelengths became larger when the object was placed nearer the observer, causing the slope of the accommodation response curve to become shallower for shorter wavelengths and steeper for longer ones. Within the accommodative range of observers, accommodative errors were small and visual acuity normal. When comparing between illuminants, when accommodation was accurate, visual acuity was worst for blue narrowband light. This cannot be due to the sparser spacing for S-cones, as our stimuli had equal luminance and thus activated LM-cones roughly equally. It is likely because ocular LCA changes more rapidly at shorter wavelength and so the finite spectral bandwidth of LEDs corresponds to a greater dioptric range at shorter wavelengths. This effect disappears for larger accommodative errors, due to the increased depth of focus of the eye.
@article{wrro214496,
doi = {10.1167/jov.24.2.11},
year = {2024},
note = {Copyright 2024 The Authors. This work is licensed under a Creative Commons Attribution 4.0 International License.},
number = {2},
publisher = {Association for Research in Vision and Ophthalmology (ARVO)},
title = {Ocular accommodation and wavelength: The effect of longitudinal chromatic aberration on the stimulus-response curve},
volume = {24},
month = {February},
journal = {Journal of Vision},
issn = {1534-7362},
keywords = {accommodation, longitudinal chromatic
aberration, stimulus-response curve, pupil size},
abstract = {The longitudinal chromatic aberration (LCA) of the eye creates a chromatic blur on the retina that is an important cue for accommodation. Although this mechanism can work optimally in broadband illuminants such as daylight, it is not clear how the system responds to the narrowband illuminants used by many modern displays. Here, we measured pupil and accommodative responses as well as visual acuity under narrowband light-emitting diode (LED) illuminants of different peak wavelengths. Observers were able to accommodate under narrowband light and compensate for the LCA of the eye, with no difference in the variability of the steady-state accommodation response between narrowband and broadband illuminants. Intriguingly, our subjects compensated more fully for LCA at nearer distances. That is, the difference in accommodation to different wavelengths became larger when the object was placed nearer the observer, causing the slope of the accommodation response curve to become shallower for shorter wavelengths and steeper for longer ones. Within the accommodative range of observers, accommodative errors were small and visual acuity normal. When comparing between illuminants, when accommodation was accurate, visual acuity was worst for blue narrowband light. This cannot be due to the sparser spacing for S-cones, as our stimuli had equal luminance and thus activated LM-cones roughly equally. It is likely because ocular LCA changes more rapidly at shorter wavelength and so the finite spectral bandwidth of LEDs corresponds to a greater dioptric range at shorter wavelengths. This effect disappears for larger accommodative errors, due to the increased depth of focus of the eye.},
author = {Fernandez-Alonso, M. and Finch, A. P. and Love, G. D. and Read, J. C. A.},
url = {https://jov.arvojournals.org/article.aspx?articleid=2793409}
}
Objectives Missing data is the most common data quality issue in electronic health records (EHRs). Missing data checks implemented in common analytical software are typically limited to counting the number of missing values in individual fields, but researchers and organisations also need to understand multifield missing data patterns to better inform advanced missing data strategies for which counts or numerical summaries are poorly suited. This study shows how set-based visualisation enables multifield missing data patterns to be discovered and investigated. Design Development and evaluation of interactive set visualisation techniques to find patterns of missing data and generate actionable insights. The visualisations comprised easily interpretable bar charts for sets, heatmaps for set intersections and histograms for distributions of both sets and intersections. Setting and participants Anonymised admitted patient care health records for National Health Service (NHS) hospitals and independent sector providers in England. The visualisation and data mining software was run over 16 million records and 86 fields in the dataset. Results The dataset contained 960 million missing values. Set visualisation bar charts showed how those values were distributed across the fields, including several fields that, unexpectedly, were not complete. Set intersection heatmaps revealed unexpected gaps in diagnosis, operation and date fields because diagnosis and operation fields were not filled up sequentially and some operations did not have corresponding dates. Information gain ratio and entropy calculations allowed us to identify the origin of each unexpected pattern, in terms of the values of other fields. Conclusions Our findings show how set visualisation reveals important insights about multifield missing data patterns in large EHR datasets. The study revealed both rare and widespread data quality issues that were previously unknown, and allowed a particular part of a specific hospital to be pinpointed as the origin of rare issues that NHS Digital did not know exist.
@article{wrro212281,
publisher = {BMJ},
title = {Using set visualisation to find and explain patterns of missing values: a case study with NHS hospital episode statistics data},
volume = {12},
month = {December},
journal = {BMJ Open},
doi = {10.1136/bmjopen-2022-064887},
year = {2023},
number = {11},
note = {{\copyright} Author(s) (or their employer(s)) 2022. This is an open access article under the terms of the Creative Commons Attribution License (CC-BY 4.0), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
author = {Ruddle, R. A. and Adnan, M. and Hall, M.},
url = {https://eprints.whiterose.ac.uk/id/eprint/212281/},
issn = {2044-6055},
abstract = {Objectives Missing data is the most common data quality issue in electronic health records (EHRs). Missing data checks implemented in common analytical software are typically limited to counting the number of missing values in individual fields, but researchers and organisations also need to understand multifield missing data patterns to better inform advanced missing data strategies for which counts or numerical summaries are poorly suited. This study shows how set-based visualisation enables multifield missing data patterns to be discovered and investigated.
Design Development and evaluation of interactive set visualisation techniques to find patterns of missing data and generate actionable insights. The visualisations comprised easily interpretable bar charts for sets, heatmaps for set intersections and histograms for distributions of both sets and intersections.
Setting and participants Anonymised admitted patient care health records for National Health Service (NHS) hospitals and independent sector providers in England. The visualisation and data mining software was run over 16 million records and 86 fields in the dataset.
Results The dataset contained 960 million missing values. Set visualisation bar charts showed how those values were distributed across the fields, including several fields that, unexpectedly, were not complete. Set intersection heatmaps revealed unexpected gaps in diagnosis, operation and date fields because diagnosis and operation fields were not filled up sequentially and some operations did not have corresponding dates. Information gain ratio and entropy calculations allowed us to identify the origin of each unexpected pattern, in terms of the values of other fields.
Conclusions Our findings show how set visualisation reveals important insights about multifield missing data patterns in large EHR datasets. The study revealed both rare and widespread data quality issues that were previously unknown, and allowed a particular part of a specific hospital to be pinpointed as the origin of rare issues that NHS Digital did not know exist.}
}
Remote collaborative work has become pervasive in many settings, ranging from engineering to medical professions. Users are immersed in virtual environments and communicate through life-sized avatars that enable face-to-face collaboration. Within this context, users often collaboratively view and interact with virtual 3D models, for example to assist in the design of new devices such as customized prosthetics, vehicles or buildings. Discussing such shared 3D content face-to-face, however, has a variety of challenges such as ambiguities, occlusions, and different viewpoints that all decrease mutual awareness, which in turn leads to decreased task performance and increased errors. To address this challenge, we introduce MAGIC, a novel approach for understanding pointing gestures in a face-to-face shared 3D space, improving mutual understanding and awareness. Our approach distorts the remote user's gestures to correctly reflect them in the local user's reference space when face-to-face. To measure what two users perceive in common when using pointing gestures in a shared 3D space, we introduce a novel metric called pointing agreement. Results from a user study suggest that MAGIC significantly improves pointing agreement in face-to-face collaboration settings, improving co-presence and awareness of interactions performed in the shared space. We believe that MAGIC improves remote collaboration by enabling simpler communication mechanisms and better mutual awareness.
@misc{wrro201242,
month = {May},
journal = {Proceeding of 2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)},
pages = {438--448},
publisher = {IEEE},
title = {MAGIC: Manipulating Avatars and Gestures to Improve Remote Collaboration},
note = {{\copyright} 2023 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
booktitle = {2023 IEEE Conference Virtual Reality and 3D User Interfaces (VR)},
doi = {10.1109/vr55154.2023.00059},
year = {2023},
abstract = {Remote collaborative work has become pervasive in many settings, ranging from engineering to medical professions. Users are immersed in virtual environments and communicate through life-sized avatars that enable face-to-face collaboration. Within this context, users often collaboratively view and interact with virtual 3D models, for example to assist in the design of new devices such as customized prosthetics, vehicles or buildings. Discussing such shared 3D content face-to-face, however, has a variety of challenges such as ambiguities, occlusions, and different viewpoints that all decrease mutual awareness, which in turn leads to decreased task performance and increased errors. To address this challenge, we introduce MAGIC, a novel approach for understanding pointing gestures in a face-to-face shared 3D space, improving mutual understanding and awareness. Our approach distorts the remote user's gestures to correctly reflect them in the local user's reference space when face-to-face. To measure what two users perceive in common when using pointing gestures in a shared 3D space, we introduce a novel metric called pointing agreement. Results from a user study suggest that MAGIC significantly improves pointing agreement in face-to-face collaboration settings, improving co-presence and awareness of interactions performed in the shared space. We believe that MAGIC improves remote collaboration by enabling simpler communication mechanisms and better mutual awareness.},
issn = {2642-5254},
keywords = {Human-centered computing, Collaborative and social computing, Collaborative and social computing theory, concepts and paradigms, Computer supported cooperative work},
url = {http://dx.doi.org/10.1109/vr55154.2023.00059},
author = {Fidalgo, CG and Sousa, M and Mendes, D and Dos Anjos, RK and Medeiros, D and Singh, K and Jorge, J}
}
Highly complex and dense models of 3D objects have recently become indispensable in digital industries. Mesh decimation then plays a crucial role in the production pipeline to efficiently get visually convincing yet compact expressions of complex meshes. However, the current pipeline typically does not allow artists control the decimation process, just a simplification rate. Thus a preferred approach in production settings splits the process into a first pass of saliency detection highlighting areas of greater detail, and allowing artists to iterate until satisfied before simplifying the model. We propose a novel, efficient multi-scale method to compute mesh saliency at coarse and finer scales, based on fast mesh entropy of local surface measurements. Unlike previous approaches, we ensure a robust and straightforward calculation of mesh saliency even for densely tessellated models with millions of polygons. Moreover, we introduce a new adaptive subsampling and interpolation algorithm for saliency estimation. Our implementation achieves speedups of up to three orders of magnitude over prior approaches. Experimental results showcase its resilience to problem scenarios that efficiently scales up to process multi-million vertex meshes. Our evaluation with artists in the entertainment industry also demonstrates its applicability to real use-case scenarios.
@article{wrro195788,
year = {2023},
doi = {10.1016/j.cag.2023.01.012},
note = {{\copyright} 2023 The Author(s). Published by Elsevier Ltd. This is an open access article under the CC BY license (http://creativecommons.org/licenses/by/4.0/)},
title = {Saliency detection for large-scale mesh decimation},
publisher = {Elsevier},
pages = {63--76},
journal = {Computers and Graphics},
month = {April},
volume = {111},
url = {http://dx.doi.org/10.1016/j.cag.2023.01.012},
author = {Kuffner dos Anjos, R and Roberts, RA and Allen, B and Jorge, J and Anjyo, K},
abstract = {Highly complex and dense models of 3D objects have recently become indispensable in digital industries. Mesh decimation then plays a crucial role in the production pipeline to efficiently get visually convincing yet compact expressions of complex meshes. However, the current pipeline typically does not allow artists control the decimation process, just a simplification rate. Thus a preferred approach in production settings splits the process into a first pass of saliency detection highlighting areas of greater detail, and allowing artists to iterate until satisfied before simplifying the model. We propose a novel, efficient multi-scale method to compute mesh saliency at coarse and finer scales, based on fast mesh entropy of local surface measurements. Unlike previous approaches, we ensure a robust and straightforward calculation of mesh saliency even for densely tessellated models with millions of polygons. Moreover, we introduce a new adaptive subsampling and interpolation algorithm for saliency estimation. Our implementation achieves speedups of up to three orders of magnitude over prior approaches. Experimental results showcase its resilience to problem scenarios that efficiently scales up to process multi-million vertex meshes. Our evaluation with artists in the entertainment industry also demonstrates its applicability to real use-case scenarios.},
keywords = {Geometric processing; Mesh saliency; Mesh decimation},
issn = {0097-8493}
}
Previous work has identified more than 100 distinct characteristics of data quality, most of which are aspects of completeness, accuracy and consistency. Other work has developed new techniques for visualizing data quality, but there is a lack of research into how users visualize data quality issues with existing, well-known techniques. We investigated how 166 participants identified and illustrated data quality issues that occurred in a 54-file, longitudinal collection of open data. The issues that participants identified spanned 27 different characteristics, nine of which do not appear in existing data quality taxonomies. Participants adopted nine visualization and tabular methods to illustrate the issues, using the methods in five ways (quantify; alert; examples; serendipitous discovery; explain). The variety of serendipitous discoveries was noteworthy, as was how rarely participants used visualization to illustrate completeness and consistency, compared with accuracy. We conclude by presenting a 106-item data quality taxonomy that combines seven previous works with our findings.
@misc{wrro212184,
month = {February},
journal = {Proceedings of the 18th International Joint Conference on Computer Vision, Imaging and Computer Graphics Theory and Applications},
volume = {3},
pages = {89--100},
publisher = {SCITEPRESS - Science and Technology Publications},
title = {Using Well-Known Techniques to Visualize Characteristics of Data Quality},
note = {? 2023 by SCITEPRESS - Science and Technology Publications, Lda. Under CC license (CC BY-NC-ND 4.0)},
year = {2023},
booktitle = {14th International Conference on Information Visualization Theory and Applications},
doi = {10.5220/0011664300003417},
author = {Ruddle, R.},
url = {https://eprints.whiterose.ac.uk/id/eprint/212184/},
keywords = {Visualization; Data Quality; Data Science; Empirical Study},
issn = {2184-5921},
abstract = {Previous work has identified more than 100 distinct characteristics of data quality, most of which are aspects of completeness, accuracy and consistency. Other work has developed new techniques for visualizing data quality, but there is a lack of research into how users visualize data quality issues with existing, well-known techniques. We investigated how 166 participants identified and illustrated data quality issues that occurred in a 54-file, longitudinal collection of open data. The issues that participants identified spanned 27 different characteristics, nine of which do not appear in existing data quality taxonomies. Participants adopted nine visualization and tabular methods to illustrate the issues, using the methods in five ways (quantify; alert; examples; serendipitous discovery; explain). The variety of serendipitous discoveries was noteworthy, as was how rarely participants used visualization to illustrate completeness and consistency, compared with accuracy. We conclude by presenting a 106-item data quality taxonomy that combines seven previous works with our findings.}
}
The use of good-quality data to inform decision making is entirely dependent on robust processes to ensure it is fit for purpose. Such processes vary between organisations, and between those tasked with designing and following them. In this paper we report on a survey of 53 data analysts from many industry sectors, 24 of whom also participated in in-depth interviews, about computational and visual methods for characterizing data and investigating data quality. The paper makes contributions in two key areas. The first is to data science fundamentals, because our lists of data profiling tasks and visualization techniques are more comprehensive than those published elsewhere. The second concerns the application question "what does good profiling look like to those who routinely perform it?," which we answer by highlighting the diversity of profiling tasks, unusual practice and exemplars of visualization, and recommendations about formalizing processes and creating rulebooks.
@article{wrro197083,
title = {Tasks and Visualizations Used for Data Profiling: A Survey and Interview Study},
year = {2023},
publisher = {Institute of Electrical and Electronics Engineers},
doi = {10.1109/TVCG.2023.3234337},
note = {Published online
{\copyright} 2023, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {January},
keywords = {Data visualization , Task analysis , Data integrity , Interviews , Visualization , Bars , Industries},
issn = {1077-2626},
abstract = {The use of good-quality data to inform decision making is entirely dependent on robust processes to ensure it is fit for purpose. Such processes vary between organisations, and between those tasked with designing and following them. In this paper we report on a survey of 53 data analysts from many industry sectors, 24 of whom also participated in in-depth interviews, about computational and visual methods for characterizing data and investigating data quality. The paper makes contributions in two key areas. The first is to data science fundamentals, because our lists of data profiling tasks and visualization techniques are more comprehensive than those published elsewhere. The second concerns the application question "what does good profiling look like to those who routinely perform it?," which we answer by highlighting the diversity of profiling tasks, unusual practice and exemplars of visualization, and recommendations about formalizing processes and creating rulebooks.},
author = {Ruddle, RA and Cheshire, J and Fernstad, SJ},
url = {https://eprints.whiterose.ac.uk/id/eprint/197083/}
}
Contour trees are a significant tool for data analysis as they capture both local and global variation. However, their utility has been limited by scalability, in particular for distributed computation and storage. We report a distributed data structure for storing the contour tree of a data set distributed on a cluster, based on a fan-in hierarchy, and an algorithm for computing it based on the boundary tree that represents only the superarcs of a contour tree that involve contours that cross boundaries between blocks. This allows us to limit the communication cost for contour tree computation to the complexity of the block boundaries rather than of the entire data set.
@misc{wrro190963,
month = {December},
journal = {2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)},
title = {Distributed Hierarchical Contour Trees},
publisher = {IEEE},
note = {This item is protected by copyright. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works. Uploaded in accordance with the publisher's self-archiving policy.},
year = {2022},
booktitle = {2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)},
doi = {10.1109/LDAV57265.2022.9966394},
issn = {2832-6512},
abstract = {Contour trees are a significant tool for data analysis as they capture both local and global variation. However, their utility has been limited by scalability, in particular for distributed computation and storage. We report a distributed data structure for storing the contour tree of a data set distributed on a cluster, based on a fan-in hierarchy, and an algorithm for computing it based on the boundary tree that represents only the superarcs of a contour tree that involve contours that cross boundaries between blocks. This allows us to limit the communication cost for contour tree computation to the complexity of the block boundaries rather than of the entire data set.},
author = {Carr, HA and R{\"u}bel, O and Weber, GH},
isbn = {978-1-6654-9156-3},
url = {https://eprints.whiterose.ac.uk/id/eprint/190963/}
}
Background Structured Medication Reviews (SMRs) are intended to help deliver the NHS Long Term Plan for medicines optimisation in people living with multiple long-term conditions and polypharmacy. It is challenging to gather the information needed for these reviews due to poor integration of health records across providers and there is little guidance on how to identify those patients most urgently requiring review. Objective To extract information from scattered clinical records on how health and medications change over time, apply interpretable artificial intelligence (AI) approaches to predict risks of poor outcomes and overlay this information on care records to inform SMRs. We will pilot this approach in primary care prescribing audit and feedback systems, and co-design future medicines optimisation decision support systems. Design DynAIRx will target potentially problematic polypharmacy in three key multimorbidity groups, namely, people with (a) mental and physical health problems, (b) four or more long-term conditions taking ten or more drugs and (c) older age and frailty. Structured clinical data will be drawn from integrated care records (general practice, hospital, and social care) covering an {$\sim$}11m population supplemented with Natural Language Processing (NLP) of unstructured clinical text. AI systems will be trained to identify patterns of conditions, medications, tests, and clinical contacts preceding adverse events in order to identify individuals who might benefit most from an SMR. Discussion By implementing and evaluating an AI-augmented visualisation of care records in an existing prescribing audit and feedback system we will create a learning system for medicines optimisation, co-designed throughout with end-users and patients.
@article{wrro197084,
title = {The DynAIRx Project Protocol: Artificial Intelligence for dynamic prescribing optimisation and care integration in multimorbidity},
publisher = {SAGE Publications},
pages = {1--14},
volume = {12},
journal = {Journal of Multimorbidity and Comorbidity},
month = {December},
doi = {10.1177/26335565221145493},
year = {2022},
note = {{\copyright} The Author(s) 2022. This is an open access article under the terms of the Creative Commons Attribution License (CC-BY 4.0), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
author = {Walker, LE and Abuzour, AS and Bollegala, D and Clegg, A and Gabbay, M and Griffiths, A and Kullu, C and Leeming, G and Mair, FS and Maskell, S and Relton, S and Ruddle, RA and Shantsila, E and Sperrin, M and Van Staa, T and Woodall, A and Buchan, I},
url = {https://eprints.whiterose.ac.uk/id/eprint/197084/},
keywords = {multimorbidity, polypharmacy, frailty, mental health, artificial intelligence, medicines optimisation},
issn = {2633-5565},
abstract = {Background
Structured Medication Reviews (SMRs) are intended to help deliver the NHS Long Term Plan for medicines optimisation in people living with multiple long-term conditions and polypharmacy. It is challenging to gather the information needed for these reviews due to poor integration of health records across providers and there is little guidance on how to identify those patients most urgently requiring review.
Objective
To extract information from scattered clinical records on how health and medications change over time, apply interpretable artificial intelligence (AI) approaches to predict risks of poor outcomes and overlay this information on care records to inform SMRs. We will pilot this approach in primary care prescribing audit and feedback systems, and co-design future medicines optimisation decision support systems.
Design
DynAIRx will target potentially problematic polypharmacy in three key multimorbidity groups, namely, people with (a) mental and physical health problems, (b) four or more long-term conditions taking ten or more drugs and (c) older age and frailty. Structured clinical data will be drawn from integrated care records (general practice, hospital, and social care) covering an {$\sim$}11m population supplemented with Natural Language Processing (NLP) of unstructured clinical text. AI systems will be trained to identify patterns of conditions, medications, tests, and clinical contacts preceding adverse events in order to identify individuals who might benefit most from an SMR.
Discussion
By implementing and evaluating an AI-augmented visualisation of care records in an existing prescribing audit and feedback system we will create a learning system for medicines optimisation, co-designed throughout with end-users and patients.}
}
Image-warping , a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, Metameric image inpainting - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.
@article{wrro194571,
publisher = {IEEE},
title = {Metameric Inpainting for Image Warping},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {October},
pages = {1--12},
doi = {10.1109/tvcg.2022.3216712},
year = {2022},
note = {Published online
This item is protected by copyright. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works. Uploaded in accordance with the publisher's self-archiving policy.},
url = {https://eprints.whiterose.ac.uk/id/eprint/194571/},
author = {Kuffner dos Anjos, R and Walton, DR and Aksit, K and Friston, S and Swapp, D and Steed, A and Ritschel, T},
abstract = {Image-warping , a per-pixel deformation of one image into another, is an essential component in immersive visual experiences such as virtual reality or augmented reality. The primary issue with image warping is disocclusions, where occluded (and hence unknown) parts of the input image would be required to compose the output image. We introduce a new image warping method, Metameric image inpainting - an approach for hole-filling in real-time with foundations in human visual perception. Our method estimates image feature statistics of disoccluded regions from their neighbours. These statistics are inpainted and used to synthesise visuals in real-time that are less noticeable to study participants, particularly in peripheral vision. Our method offers speed improvements over the standard structured image inpainting methods while improving realism over colour-based inpainting such as push-pull. Hence, our work paves the way towards future applications such as depth image-based rendering, 6-DoF 360 rendering, and remote render-streaming.},
issn = {1077-2626},
keywords = {Inpainting , warping , perception , real-time rendering}
}
Screen-space ambient occlusion (SSAO) shows high efficiency and is widely used in real-time 3D applications. However, using SSAO algorithms in stereo rendering can lead to inconsistencies due to the differences in the screen-space information captured by the left and right eye. This will affect the perception of the scene and may be a source of viewer discomfort. In this paper, we show that the raw obscurance estimation part and subsequent filtering are both sources of inconsistencies. We developed a screen-space method involving both views in conjunction, leading to a stereo-aware raw obscurance estimation method and a stereo-aware bilateral filter. The results show that our method reduces stereo inconsistencies to a level comparable to geometry-based AO solutions, while maintaining the performance benefits of a screen-space approach.
@article{wrro187713,
note = {{\copyright} 2022 Copyright held by the owner/author(s). This is an open access article under the terms of the Creative Commons Attribution-ShareAlike 4.0 International (CC BY-SA 4.0)},
number = {1},
year = {2022},
doi = {10.1145/3522614},
author = {P Shi and M Billeter and E Eisemann},
month = {May},
journal = {Proceedings of the ACM on computer graphics and interactive techniques},
volume = {5},
title = {Stereo-consistent screen-space ambient occlusion},
publisher = {Association for Computing Machinery (ACM)},
abstract = {Screen-space ambient occlusion (SSAO) shows high efficiency and is widely used in real-time 3D applications. However, using SSAO algorithms in stereo rendering can lead to inconsistencies due to the differences in the screen-space information captured by the left and right eye. This will affect the perception of the scene and may be a source of viewer discomfort. In this paper, we show that the raw obscurance estimation part and subsequent filtering are both sources of inconsistencies. We developed a screen-space method involving both views in conjunction, leading to a stereo-aware raw obscurance estimation method and a stereo-aware bilateral filter. The results show that our method reduces stereo inconsistencies to a level comparable to geometry-based AO solutions, while maintaining the performance benefits of a screen-space approach.},
keywords = {screen-space ambient occlusion; stereo consistency; VR},
url = {https://eprints.whiterose.ac.uk/id/eprint/187713/}
}
Background National audits aim to reduce variations in quality by stimulating quality improvement. However, varying provider engagement with audit data means that this is not being realised. Aim The aim of the study was to develop and evaluate a quality dashboard (i.e. QualDash) to support clinical teams' and managers' use of national audit data. Design The study was a realist evaluation and biography of artefacts study. Setting The study involved five NHS acute trusts. Methods and results In phase 1, we developed a theory of national audits through interviews. Data use was supported by data access, audit staff skilled to produce data visualisations, data timeliness and quality, and the importance of perceived metrics. Data were mainly used by clinical teams. Organisational-level staff questioned the legitimacy of national audits. In phase 2, QualDash was co-designed and the QualDash theory was developed. QualDash provides interactive customisable visualisations to enable the exploration of relationships between variables. Locating QualDash on site servers gave users control of data upload frequency. In phase 3, we developed an adoption strategy through focus groups. 'Champions', awareness-raising through e-bulletins and demonstrations, and quick reference tools were agreed. In phase 4, we tested the QualDash theory using a mixed-methods evaluation. Constraints on use were metric configurations that did not match users' expectations, affecting champions' willingness to promote QualDash, and limited computing resources. Easy customisability supported use. The greatest use was where data use was previously constrained. In these contexts, report preparation time was reduced and efforts to improve data quality were supported, although the interrupted time series analysis did not show improved data quality. Twenty-three questionnaires were returned, revealing positive perceptions of ease of use and usefulness. In phase 5, the feasibility of conducting a cluster randomised controlled trial of QualDash was assessed. Interviews were undertaken to understand how QualDash could be revised to support a region-wide Gold Command. Requirements included multiple real-time data sources and functionality to help to identify priorities. Conclusions Audits seeking to widen engagement may find the following strategies beneficial: involving a range of professional groups in choosing metrics; real-time reporting; presenting 'headline' metrics important to organisational-level staff; using routinely collected clinical data to populate data fields; and dashboards that help staff to explore and report audit data. Those designing dashboards may find it beneficial to include the following: 'at a glance' visualisation of key metrics; visualisations configured in line with existing visualisations that teams use, with clear labelling; functionality that supports the creation of reports and presentations; the ability to explore relationships between variables and drill down to look at subgroups; and low requirements for computing resources. Organisations introducing a dashboard may find the following strategies beneficial: clinical champion to promote use; testing with real data by audit staff; establishing routines for integrating use into work practices; involving audit staff in adoption activities; and allowing customisation. Limitations The COVID-19 pandemic stopped phase 4 data collection, limiting our ability to further test and refine the QualDash theory. Questionnaire results should be treated with caution because of the small, possibly biased, sample. Control sites for the interrupted time series analysis were not possible because of research and development delays. One intervention site did not submit data. Limited uptake meant that assessing the impact on more measures was not appropriate. Future work The extent to which national audit dashboards are used and the strategies national audits use to encourage uptake, a realist review of the impact of dashboards, and rigorous evaluations of the impact of dashboards and the effectiveness of adoption strategies should be explored. Study registration This study is registered as ISRCTN18289782. Funding This project was funded by the National Institute for Health and Care Research (NIHR) Health and Social Care Delivery Research programme and will be published in full in Health and Social Care Delivery Research; Vol. 10, No. 12. See the NIHR Journals Library website for further project information.
@article{wrro188565,
number = {12},
note = {{\copyright} 2022 Randell et al. This is an open access article under the terms of the Creative Commons Attribution 4.0 International License (CC BY 4.0) (https://creativecommons.org/licenses/by/4.0/)},
year = {2022},
doi = {10.3310/wbkw4927},
month = {May},
journal = {Health and Social Care Delivery Research},
volume = {10},
publisher = {NIHR Journals Library},
title = {Design and evaluation of an interactive quality dashboard for national clinical audit data: a realist evaluation},
author = {Randell, R and Alvarado, N and Elshehaly, M and McVey, L and West, RM and Doherty, P and Dowding, D and Farrin, AJ and Feltbower, RG and Gale, CP and Greenhalgh, J and Lake, J and Mamas, M and Walwyn, R and Ruddle, RA},
url = {https://eprints.whiterose.ac.uk/id/eprint/188565/},
issn = {2755-0060},
abstract = {Background
National audits aim to reduce variations in quality by stimulating quality improvement. However, varying provider engagement with audit data means that this is not being realised.
Aim
The aim of the study was to develop and evaluate a quality dashboard (i.e. QualDash) to support clinical teams' and managers' use of national audit data.
Design
The study was a realist evaluation and biography of artefacts study.
Setting
The study involved five NHS acute trusts.
Methods and results
In phase 1, we developed a theory of national audits through interviews. Data use was supported by data access, audit staff skilled to produce data visualisations, data timeliness and quality, and the importance of perceived metrics. Data were mainly used by clinical teams. Organisational-level staff questioned the legitimacy of national audits. In phase 2, QualDash was co-designed and the QualDash theory was developed. QualDash provides interactive customisable visualisations to enable the exploration of relationships between variables. Locating QualDash on site servers gave users control of data upload frequency. In phase 3, we developed an adoption strategy through focus groups. 'Champions', awareness-raising through e-bulletins and demonstrations, and quick reference tools were agreed. In phase 4, we tested the QualDash theory using a mixed-methods evaluation. Constraints on use were metric configurations that did not match users' expectations, affecting champions' willingness to promote QualDash, and limited computing resources. Easy customisability supported use. The greatest use was where data use was previously constrained. In these contexts, report preparation time was reduced and efforts to improve data quality were supported, although the interrupted time series analysis did not show improved data quality. Twenty-three questionnaires were returned, revealing positive perceptions of ease of use and usefulness. In phase 5, the feasibility of conducting a cluster randomised controlled trial of QualDash was assessed. Interviews were undertaken to understand how QualDash could be revised to support a region-wide Gold Command. Requirements included multiple real-time data sources and functionality to help to identify priorities.
Conclusions
Audits seeking to widen engagement may find the following strategies beneficial: involving a range of professional groups in choosing metrics; real-time reporting; presenting 'headline' metrics important to organisational-level staff; using routinely collected clinical data to populate data fields; and dashboards that help staff to explore and report audit data. Those designing dashboards may find it beneficial to include the following: 'at a glance' visualisation of key metrics; visualisations configured in line with existing visualisations that teams use, with clear labelling; functionality that supports the creation of reports and presentations; the ability to explore relationships between variables and drill down to look at subgroups; and low requirements for computing resources. Organisations introducing a dashboard may find the following strategies beneficial: clinical champion to promote use; testing with real data by audit staff; establishing routines for integrating use into work practices; involving audit staff in adoption activities; and allowing customisation.
Limitations
The COVID-19 pandemic stopped phase 4 data collection, limiting our ability to further test and refine the QualDash theory. Questionnaire results should be treated with caution because of the small, possibly biased, sample. Control sites for the interrupted time series analysis were not possible because of research and development delays. One intervention site did not submit data. Limited uptake meant that assessing the impact on more measures was not appropriate.
Future work
The extent to which national audit dashboards are used and the strategies national audits use to encourage uptake, a realist review of the impact of dashboards, and rigorous evaluations of the impact of dashboards and the effectiveness of adoption strategies should be explored.
Study registration
This study is registered as ISRCTN18289782.
Funding
This project was funded by the National Institute for Health and Care Research (NIHR) Health and Social Care Delivery Research programme and will be published in full in Health and Social Care Delivery Research; Vol. 10, No. 12. See the NIHR Journals Library website for further project information.}
}
Computer-Generated Holography (CGH) offers the potential for genuine, high-quality three-dimensional visuals. However, fulfilling this potential remains a practical challenge due to computational complexity and visual quality issues. We propose a new CGH method that exploits gaze-contingency and perceptual graphics to accelerate the development of practical holographic display systems. Firstly, our method infers the user's focal depth and generates images only at their focus plane without using any moving parts. Second, the images displayed are metamers; in the user's peripheral vision, they need only be statistically correct and blend with the fovea seamlessly. Unlike previous methods, our method prioritises and improves foveal visual quality without causing perceptually visible distortions at the periphery. To enable our method, we introduce a novel metameric loss function that robustly compares the statistics of two given images for a known gaze location. In parallel, we implement a model representing the relation between holograms and their image reconstructions. We couple our differentiable loss function and model to metameric varifocal holograms using a stochastic gradient descent solver. We evaluate our method with an actual proof-of-concept holographic display, and we show that our CGH method leads to practical and perceptually three-dimensional image reconstructions.
@misc{wrro187760,
doi = {10.1109/vr51125.2022.00096},
booktitle = {2022 IEEE on Conference Virtual Reality and 3D User Interfaces (VR)},
year = {2022},
journal = {2022 IEEE Conference on Virtual Reality and 3D User Interfaces (VR)},
month = {April},
pages = {746--755},
publisher = {IEEE},
title = {Metameric Varifocal Holograms},
issn = {2642-5246},
url = {https://eprints.whiterose.ac.uk/id/eprint/187760/},
abstract = {Computer-Generated Holography (CGH) offers the potential for genuine, high-quality three-dimensional visuals. However, fulfilling this potential remains a practical challenge due to computational complexity and visual quality issues. We propose a new CGH method that exploits gaze-contingency and perceptual graphics to accelerate the development of practical holographic display systems. Firstly, our method infers the user's focal depth and generates images only at their focus plane without using any moving parts. Second, the images displayed are metamers; in the user's peripheral vision, they need only be statistically correct and blend with the fovea seamlessly. Unlike previous methods, our method prioritises and improves foveal visual quality without causing perceptually visible distortions at the periphery. To enable our method, we introduce a novel metameric loss function that robustly compares the statistics of two given images for a known gaze location. In parallel, we implement a model representing the relation between holograms and their image reconstructions. We couple our differentiable loss function and model to metameric varifocal holograms using a stochastic gradient descent solver. We evaluate our method with an actual proof-of-concept holographic display, and we show that our CGH method leads to practical and perceptually three-dimensional image reconstructions.},
keywords = {Computer-Generated Holography , Foveated Rendering , Metamerisation , Varifocal Near-Eye Displays , Virtual Reality , Augmented Reality},
isbn = {978-1-6654-9618-6},
author = {Walton, DR and Kavakli, K and Dos Anjos, RK and Swapp, D and Weyrich, T and Urey, H and Steed, A and Ritschel, T and Aksit, K}
}
Computer-Generated Holography (CGH) promises to deliver genuine, high-quality visuals at any depth. We argue that combining CGH and perceptually guided graphics can soon lead to practical holographic display systems that deliver perceptually realistic images. We propose a new CGH method called metameric varifocal holograms. Our CGH method generates images only at a user's focus plane while displayed images are statistically correct and indistinguishable from actual targets across peripheral vision (metamers). Thus, a user observing our holograms is set to perceive a high quality visual at their gaze location. At the same time, the integrity of the image follows a statistically correct trend in the remaining peripheral parts. We demonstrate our differentiable CGH optimization pipeline on modern GPUs, and we support our findings with a display prototype. Our method will pave the way towards realistic visuals free from classical CGH problems, such as speckle noise or poor visual quality.
@misc{wrro185953,
volume = {12024},
month = {March},
journal = {Advances in Display Technologies XII},
title = {Perceptually guided computer-generated holography},
publisher = {SPIE},
note = {{\copyright}2022 Society of Photo-Optical Instrumentation Engineers (SPIE). One print or electronic copy may be made for personal use only. Systematic reproduction and distribution, duplication of any material in this publication for a fee or for commercial purposes, and modification of the contents of the publication are prohibited.},
doi = {10.1117/12.2610251},
booktitle = {SPIE OPTO, 2022},
year = {2022},
author = {Aksit, K and Kavakli, K and Walton, D and Steed, A and Urey, H and Kuffner Dos Anjos, R and Friston, S and Weyrich, T and Ritschel, T},
url = {https://eprints.whiterose.ac.uk/id/eprint/185953/},
issn = {0277-786X},
abstract = {Computer-Generated Holography (CGH) promises to deliver genuine, high-quality visuals at any depth. We argue that combining CGH and perceptually guided graphics can soon lead to practical holographic display systems that deliver perceptually realistic images. We propose a new CGH method called metameric varifocal holograms. Our CGH method generates images only at a user's focus plane while displayed images are statistically correct and indistinguishable from actual targets across peripheral vision (metamers). Thus, a user observing our holograms is set to perceive a high quality visual at their gaze location. At the same time, the integrity of the image follows a statistically correct trend in the remaining peripheral parts. We demonstrate our differentiable CGH optimization pipeline on modern GPUs, and we support our findings with a display prototype. Our method will pave the way towards realistic visuals free from classical CGH problems, such as speckle noise or poor visual quality.}
}
Benefiting from the excellent performance of Siamese-based trackers, huge progress on 2D visual tracking has been achieved. However, 3D visual tracking is still under-explored. Inspired by the idea of Hough voting in 3D object detection, in this paper, we propose a Multi-level Voting Siamese Network (MLVSNet) for 3D visual tracking from outdoor point cloud sequences. To deal with sparsity in outdoor 3D point clouds, we propose to perform Hough voting on multi-level features to get more vote centers and retain more useful information, instead of voting only on the fi-nal level feature as in previous methods. We also design an efficient and lightweight Target-Guided Attention (TGA) module to transfer the target information and highlight the target points in the search area. Moreover, we propose a Vote-cluster Feature Enhancement (VFE) module to exploit the relationships between different vote clusters. Extensive experiments on the 3D tracking benchmark of KITTI dataset demonstrate that our MLVSNet outperforms state-of-the-art methods with significant margins. Code will be available at https://github.com/CodeWZT/MLVSNet.
@misc{wrro233703,
pages = {3081--3090},
month = {February},
journal = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
title = {MLVSNet: Multi-level Voting Siamese Network for 3D Visual Tracking},
publisher = {IEEE},
note = {{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
year = {2022},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
doi = {10.1109/iccv48922.2021.00309},
url = {https://ieeexplore.ieee.org/document/9710975},
issn = {1550-5499},
isbn = {978-1-6654-2813-2},
author = {Wang, Z. and Xie, Q. and Lai, Y.-K. and Wu, J. and Long, K. and Wang, J.},
abstract = {Benefiting from the excellent performance of Siamese-based trackers, huge progress on 2D visual tracking has been achieved. However, 3D visual tracking is still under-explored. Inspired by the idea of Hough voting in 3D object detection, in this paper, we propose a Multi-level Voting Siamese Network (MLVSNet) for 3D visual tracking from outdoor point cloud sequences. To deal with sparsity in outdoor 3D point clouds, we propose to perform Hough voting on multi-level features to get more vote centers and retain more useful information, instead of voting only on the fi-nal level feature as in previous methods. We also design an efficient and lightweight Target-Guided Attention (TGA) module to transfer the target information and highlight the target points in the search area. Moreover, we propose a Vote-cluster Feature Enhancement (VFE) module to exploit the relationships between different vote clusters. Extensive experiments on the 3D tracking benchmark of KITTI dataset demonstrate that our MLVSNet outperforms state-of-the-art methods with significant margins. Code will be available at https://github.com/CodeWZT/MLVSNet.},
keywords = {Detection and localization in 2D and 3D, Motion and tracking}
}
Hough voting, as has been demonstrated in VoteNet, is effective for 3D object detection, where voting is a key step. In this paper, we propose a novel VoteNet-based 3D detector with vote enhancement to improve the detection accuracy in cluttered indoor scenes. It addresses the limitations of current voting schemes, i.e., votes from neighboring objects and background have significant negative impacts. Before voting, we replace the classic MLP with the proposed Attentive MLP (AMLP) in the backbone network to get better feature description of seed points. During voting, we design a new vote attraction loss (VALoss) to enforce vote centers to locate closely and compactly to the corresponding object centers. After voting, we then devise a vote weighting module to integrate the foreground/background prediction into the vote aggregation process to enhance the capability of the original VoteNet to handle noise from background voting. The three proposed strategies all contribute to more effective voting and improved performance, resulting in a novel 3D object detector, termed VENet. Experiments show that our method outperforms state-of-the-art methods on benchmark datasets. Ablation studies demonstrate the effectiveness of the proposed components.
@misc{wrro233702,
journal = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
month = {February},
pages = {3692--3701},
publisher = {IEEE},
title = {VENet: Voting Enhancement Network for 3D Object Detection},
note = {{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
booktitle = {2021 IEEE/CVF International Conference on Computer Vision (ICCV)},
doi = {10.1109/iccv48922.2021.00369},
year = {2022},
url = {https://ieeexplore.ieee.org/document/9710665},
issn = {1550-5499},
isbn = {978-1-6654-2813-2},
author = {Xie, Q. and Lai, Y.-K. and Wu, J. and Wang, Z. and Lu, D. and Wei, M. and Wang, J.},
abstract = {Hough voting, as has been demonstrated in VoteNet, is effective for 3D object detection, where voting is a key step. In this paper, we propose a novel VoteNet-based 3D detector with vote enhancement to improve the detection accuracy in cluttered indoor scenes. It addresses the limitations of current voting schemes, i.e., votes from neighboring objects and background have significant negative impacts. Before voting, we replace the classic MLP with the proposed Attentive MLP (AMLP) in the backbone network to get better feature description of seed points. During voting, we design a new vote attraction loss (VALoss) to enforce vote centers to locate closely and compactly to the corresponding object centers. After voting, we then devise a vote weighting module to integrate the foreground/background prediction into the vote aggregation process to enhance the capability of the original VoteNet to handle noise from background voting. The three proposed strategies all contribute to more effective voting and improved performance, resulting in a novel 3D object detector, termed VENet. Experiments show that our method outperforms state-of-the-art methods on benchmark datasets. Ablation studies demonstrate the effectiveness of the proposed components.},
keywords = {Detection and localization in 2D and 3D, Scene analysis and understanding, Vision for robotics and autonomous vehicles}
}
In this paper, we present a system that allows a user with a head-mounted display (HMD) to communicate and collaborate with spectators outside of the headset. We evaluate its impact on task performance, immersion, and collaborative interaction. Our solution targets scenarios like live presentations or multi-user collaborative systems, where it is not convenient to develop a VR multiplayer experience and supply each user (and spectator) with an HMD. The spectator views the virtual world on a large-scale tiled video wall and is given the ability to control the orientation of their own virtual camera. This allows spectators to stay focused on the immersed user's point of view or freely look around the environment. To improve collaboration between users, we implemented a pointing system where a spectator can point at objects on the screen, which maps an indicator directly onto the objects in the virtual world. We conducted a user study to investigate the influence of rotational camera decoupling and pointing gestures in the context of HMD-immersed and non-immersed users utilizing a large-scale display. Our results indicate that camera decoupling and pointing positively impacts collaboration. A decoupled view is preferable in situations where both users need to indicate objects of interest in the scene, such as presentations and joint-task scenarios, as it requires a shared reference space. A coupled view, on the other hand, is preferable in synchronous interactions such as remote-assistant scenarios.
@article{wrro194572,
publisher = {Association for Computing Machinery (ACM)},
title = {Spectator View: Enabling Asymmetric Interaction between HMD Wearers and Spectators with a Large Display},
volume = {5},
journal = {Proceedings of the ACM on Human-Computer Interaction},
month = {November},
pages = {1--17},
doi = {10.1145/3486951},
year = {2021},
number = {ISS},
note = {Copyright {\copyright} 2021 ACM. This is an author produced version of a paper published in Proceedings of the ACM on Human-Computer Interaction. Uploaded in accordance with the publisher's self-archiving policy.},
url = {https://eprints.whiterose.ac.uk/id/eprint/194572/},
author = {Welsford-Ackroyd, F and Chalmers, A and Kuffner dos Anjos, R and Medeiros, D and Kim, H and Rhee, T},
abstract = {In this paper, we present a system that allows a user with a head-mounted display (HMD) to communicate and collaborate with spectators outside of the headset. We evaluate its impact on task performance, immersion, and collaborative interaction. Our solution targets scenarios like live presentations or multi-user collaborative systems, where it is not convenient to develop a VR multiplayer experience and supply each user (and spectator) with an HMD. The spectator views the virtual world on a large-scale tiled video wall and is given the ability to control the orientation of their own virtual camera. This allows spectators to stay focused on the immersed user's point of view or freely look around the environment. To improve collaboration between users, we implemented a pointing system where a spectator can point at objects on the screen, which maps an indicator directly onto the objects in the virtual world. We conducted a user study to investigate the influence of rotational camera decoupling and pointing gestures in the context of HMD-immersed and non-immersed users utilizing a large-scale display. Our results indicate that camera decoupling and pointing positively impacts collaboration. A decoupled view is preferable in situations where both users need to indicate objects of interest in the scene, such as presentations and joint-task scenarios, as it requires a shared reference space. A coupled view, on the other hand, is preferable in synchronous interactions such as remote-assistant scenarios.},
issn = {2573-0142}
}
Background: Dashboards can support data-driven quality improvements in health care. They visualize data in ways intended to ease cognitive load and support data comprehension, but how they are best integrated into working practices needs further investigation. Objective: This paper reports the findings of a realist evaluation of a web-based quality dashboard (QualDash) developed to support the use of national audit data in quality improvement. Methods: QualDash was co-designed with data users and installed in 8 clinical services (3 pediatric intensive care units and 5 cardiology services) across 5 health care organizations (sites A-E) in England between July and December 2019. Champions were identified to support adoption. Data to evaluate QualDash were collected between July 2019 and August 2021 and consisted of 148.5 hours of observations including hospital wards and clinical governance meetings, log files that captured the extent of use of QualDash over 12 months, and a questionnaire designed to assess the dashboard's perceived usefulness and ease of use. Guided by the principles of realist evaluation, data were analyzed to understand how, why, and in what circumstances QualDash supported the use of national audit data in quality improvement. Results: The observations revealed that variation across sites in the amount and type of resources available to support data use, alongside staff interactions with QualDash, shaped its use and impact. Sites resourced with skilled audit support staff and established reporting systems (sites A and C) continued to use existing processes to report data. A number of constraints influenced use of QualDash in these sites including that some dashboard metrics were not configured in line with user expectations and staff were not fully aware how QualDash could be used to facilitate their work. In less well-resourced services, QualDash automated parts of their reporting process, streamlining the work of audit support staff (site B), and, in some cases, highlighted issues with data completeness that the service worked to address (site E). Questionnaire responses received from 23 participants indicated that QualDash was perceived as useful and easy to use despite its variable use in practice. Conclusions: Web-based dashboards have the potential to support data-driven improvement, providing access to visualizations that can help users address key questions about care quality. Findings from this study point to ways in which dashboard design might be improved to optimize use and impact in different contexts; this includes using data meaningful to stakeholders in the co-design process and actively engaging staff knowledgeable about current data use and routines in the scrutiny of the dashboard metrics and functions. In addition, consideration should be given to the processes of data collection and upload that underpin the quality of the data visualized and consequently its potential to stimulate quality improvement. International Registered Report Identifier (IRRID): RR2-10.1136/bmjopen-2019-033208
@article{wrro181546,
note = {{\copyright}Natasha Alvarado, Lynn McVey, Mai Elshehaly, Joanne Greenhalgh, Dawn Dowding, Roy Ruddle, Chris P Gale, Mamas Mamas, Patrick Doherty, Robert West, Richard Feltbower, Rebecca Randell. This is an open access article under the terms of the Creative Commons Attribution 4.0 International (CC BY 4.0)},
number = {11},
doi = {10.2196/28854},
year = {2021},
volume = {23},
month = {November},
journal = {Journal of Medical Internet Research},
publisher = {JMIR Publications},
title = {Analysis of a Web-Based Dashboard to Support the Use of National Audit Data in Quality Improvement: Realist Evaluation},
abstract = {Background:
Dashboards can support data-driven quality improvements in health care. They visualize data in ways intended to ease cognitive load and support data comprehension, but how they are best integrated into working practices needs further investigation.
Objective:
This paper reports the findings of a realist evaluation of a web-based quality dashboard (QualDash) developed to support the use of national audit data in quality improvement.
Methods:
QualDash was co-designed with data users and installed in 8 clinical services (3 pediatric intensive care units and 5 cardiology services) across 5 health care organizations (sites A-E) in England between July and December 2019. Champions were identified to support adoption. Data to evaluate QualDash were collected between July 2019 and August 2021 and consisted of 148.5 hours of observations including hospital wards and clinical governance meetings, log files that captured the extent of use of QualDash over 12 months, and a questionnaire designed to assess the dashboard's perceived usefulness and ease of use. Guided by the principles of realist evaluation, data were analyzed to understand how, why, and in what circumstances QualDash supported the use of national audit data in quality improvement.
Results:
The observations revealed that variation across sites in the amount and type of resources available to support data use, alongside staff interactions with QualDash, shaped its use and impact. Sites resourced with skilled audit support staff and established reporting systems (sites A and C) continued to use existing processes to report data. A number of constraints influenced use of QualDash in these sites including that some dashboard metrics were not configured in line with user expectations and staff were not fully aware how QualDash could be used to facilitate their work. In less well-resourced services, QualDash automated parts of their reporting process, streamlining the work of audit support staff (site B), and, in some cases, highlighted issues with data completeness that the service worked to address (site E). Questionnaire responses received from 23 participants indicated that QualDash was perceived as useful and easy to use despite its variable use in practice.
Conclusions:
Web-based dashboards have the potential to support data-driven improvement, providing access to visualizations that can help users address key questions about care quality. Findings from this study point to ways in which dashboard design might be improved to optimize use and impact in different contexts; this includes using data meaningful to stakeholders in the co-design process and actively engaging staff knowledgeable about current data use and routines in the scrutiny of the dashboard metrics and functions. In addition, consideration should be given to the processes of data collection and upload that underpin the quality of the data visualized and consequently its potential to stimulate quality improvement.
International Registered Report Identifier (IRRID):
RR2-10.1136/bmjopen-2019-033208},
keywords = {data; QualDash; audit (2); dashboards (1); support (20); quality (15)},
issn = {1438-8871},
url = {https://eprints.whiterose.ac.uk/id/eprint/181546/},
author = {Alvarado, N and McVey, L and Elshehaly, M and Greenhalgh, J and Dowding, D and Ruddle, R and Gale, CP and Mamas, M and Doherty, P and West, R and Feltbower, R and Randell, R}
}
t-distributed Stochastic Neighbour Embedding (t-SNE) has become a standard for exploratory data analysis, as it is capable of revealing clusters even in complex data while requiring minimal user input. While its run-time complexity limited it to small datasets in the past, recent efforts improved upon the expensive similarity computations and the previously quadratic minimization. Nevertheless, t-SNE still has high runtime and memory costs when operating on millions of points. We present a novel method for executing the t-SNE minimization. While our method overall retains a linear runtime complexity, we obtain a significant performance increase in the most expensive part of the minimization. We achieve a significant improvement without a noticeable decrease in accuracy even when targeting a 3D embedding. Our method constructs a pair of spatial hierarchies over the embedding, which are simultaneously traversed to approximate many N-body interactions at once. We demonstrate an efficient GPGPU implementation and evaluate its performance against state-of-the-art methods on a variety of datasets.
@article{wrro179727,
note = {{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
doi = {10.1109/tvcg.2021.3114817},
year = {2021},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {September},
pages = {1--1},
author = {M van de Ruit and M Billeter and E Eisemann},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {An Efficient Dual-Hierarchy t-SNE Minimization},
abstract = {t-distributed Stochastic Neighbour Embedding (t-SNE) has become a standard for exploratory data analysis, as it is capable of revealing clusters even in complex data while requiring minimal user input. While its run-time complexity limited it to small datasets in the past, recent efforts improved upon the expensive similarity computations and the previously quadratic minimization. Nevertheless, t-SNE still has high runtime and memory costs when operating on millions of points. We present a novel method for executing the t-SNE minimization. While our method overall retains a linear runtime complexity, we obtain a significant performance increase in the most expensive part of the minimization. We achieve a significant improvement without a noticeable decrease in accuracy even when targeting a 3D embedding. Our method constructs a pair of spatial hierarchies over the embedding, which are simultaneously traversed to approximate many N-body interactions at once. We demonstrate an efficient GPGPU implementation and evaluate its performance against state-of-the-art methods on a variety of datasets.},
keywords = {High dimensional data , dimensionality reduction , parallel data structures , dual-hierarchy , GPGPU},
url = {https://eprints.whiterose.ac.uk/id/eprint/179727/}
}
The contour tree is one of the principal tools in scientific visualisation. It captures the connectivity of level sets in scalar fields. In order to apply the contour tree to exascale data we need efficient shared memory and distributed algorithms. Recent work has revealed a parallel performance bottleneck caused by substructures of contour trees called W-structures. We report two novel algorithms that detect and extract the W-structures. We also use the W-structures to show that extended persistence is not equivalent to branch decomposition and leaf-pruning.
@misc{wrro167116,
title = {W-Structures in Contour Trees},
publisher = {Springer},
pages = {3--18},
journal = {Topological Methods in Data Analysis and Visualization VI},
month = {September},
series = {Mathematics and Visualization (MATHVISUAL)},
year = {2021},
booktitle = {Topological Methods in Data Analysis and Visualization VI},
doi = {https://doi.org/10.1007/978-3-030-83500-2\_1},
note = {{\copyright} 2021 The Author(s), under exclusive license to Springer Nature Switzerland AG. This version of the article has been accepted for publication, after peer review (when applicable) and is subject to Springer Nature's AM terms of use (https://www.springernature.com/gp/open-research/policies/accepted-manuscript-terms), but is not the Version of Record and does not reflect post-acceptance improvements, or any corrections. The Version of Record is available online at https://doi.org/10.1007/978-3-030-83500-2\_1.},
issn = {1612-3786},
abstract = {The contour tree is one of the principal tools in scientific visualisation. It captures the connectivity of level sets in scalar fields. In order to apply the contour tree to exascale data we need efficient shared memory and distributed algorithms. Recent work has revealed a parallel performance bottleneck caused by substructures of contour trees called W-structures. We report two novel algorithms that detect and extract the W-structures. We also use the W-structures to show that extended persistence is not equivalent to branch decomposition and leaf-pruning.},
author = {Hristov, P and Carr, HA},
isbn = {978-3-030-83499-9},
url = {https://eprints.whiterose.ac.uk/id/eprint/167116/}
}
To peripheral vision, a pair of physically different images can look the same. Such pairs are metamers relative to each other, just as physically-different spectra of light are perceived as the same color. We propose a real-time method to compute such ventral metamers for foveated rendering where, in particular for near-eye displays, the largest part of the framebuffer maps to the periphery. This improves in quality over state-of-the-art foveation methods which blur the periphery. Work in Vision Science has established how peripheral stimuli are ventral metamers if their statistics are similar. Existing methods, however, require a costly optimization process to find such metamers. To this end, we propose a novel type of statistics particularly well-suited for practical real-time rendering: smooth moments of steerable filter responses. These can be extracted from images in time constant in the number of pixels and in parallel over all pixels using a GPU. Further, we show that they can be compressed effectively and transmitted at low bandwidth. Finally, computing realizations of those statistics can again be performed in constant time and in parallel. This enables a new level of quality for foveated applications such as such as remote rendering, level-of-detail and Monte-Carlo denoising. In a user study, we finally show how human task performance increases and foveation artifacts are less suspicious, when using our method compared to common blurring.
@article{wrro180009,
title = {Beyond blur: real-time ventral metamers for foveated rendering},
publisher = {Association for Computing Machinery (ACM)},
pages = {1--14},
volume = {40},
journal = {ACM Transactions on Graphics},
month = {August},
doi = {10.1145/3450626.3459943},
year = {2021},
number = {4},
note = {{\copyright} 2021 Copyright held by the owner/author(s). This is an author produced version of an article published in ACM Transactions on Graphics. Uploaded in accordance with the publisher's self-archiving policy.},
abstract = {To peripheral vision, a pair of physically different images can look the same. Such pairs are metamers relative to each other, just as physically-different spectra of light are perceived as the same color. We propose a real-time method to compute such ventral metamers for foveated rendering where, in particular for near-eye displays, the largest part of the framebuffer maps to the periphery. This improves in quality over state-of-the-art foveation methods which blur the periphery. Work in Vision Science has established how peripheral stimuli are ventral metamers if their statistics are similar. Existing methods, however, require a costly optimization process to find such metamers. To this end, we propose a novel type of statistics particularly well-suited for practical real-time rendering: smooth moments of steerable filter responses. These can be extracted from images in time constant in the number of pixels and in parallel over all pixels using a GPU. Further, we show that they can be compressed effectively and transmitted at low bandwidth. Finally, computing realizations of those statistics can again be performed in constant time and in parallel. This enables a new level of quality for foveated applications such as such as remote rendering, level-of-detail and Monte-Carlo denoising. In a user study, we finally show how human task performance increases and foveation artifacts are less suspicious, when using our method compared to common blurring.},
issn = {0730-0301},
url = {https://eprints.whiterose.ac.uk/id/eprint/180009/},
author = {Walton, DR and dos Anjos, RK and Friston, S and Swapp, D and Ak{\cs}it, K and Steed, A and Ritschel, T}
}
Head-Mounted Virtual reality (VR) systems provide full-immersive experiences to users and completely isolate them from the outside world, placing them in unsafe situations. Existing research proposed different alert-based solutions to address this. Our work builds on these studies on notification systems for VR environments from a different perspective. We focus on: (i) exploring alert systems to notify VR users about non-immersed bystanders' in socially related, non-critical interaction contexts; (ii) understanding how best to provide awareness of non-immersed bystanders while maintaining presence and immersion within the Virtual Environment(VE). To this end, we developed single and combined alert cues - leveraging proxemics, perception channels, and push/pull approaches and evaluated those via two user studies. Our findings indicate a strong preference towards maintaining immersion and combining audio and visual cues, push and pull notification techniques that evolve dynamically based on proximity.
@misc{wrro180010,
publisher = {IEEE},
title = {Promoting Reality Awareness in Virtual Reality through Proxemics},
month = {May},
journal = {2021 IEEE Virtual Reality and 3D User Interfaces (VR)},
pages = {21--30},
doi = {10.1109/vr50410.2021.00022},
booktitle = {2021 IEEE Virtual Reality and 3D User Interfaces (VR)},
year = {2021},
note = {{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works. Uploaded in accordance with the publisher's self-archiving policy.},
keywords = {Notifications, Virtual Reality, Human Computer Interaction;Context Awareness, Reality Awareness},
issn = {2642-5254},
abstract = {Head-Mounted Virtual reality (VR) systems provide full-immersive experiences to users and completely isolate them from the outside world, placing them in unsafe situations. Existing research proposed different alert-based solutions to address this. Our work builds on these studies on notification systems for VR environments from a different perspective. We focus on: (i) exploring alert systems to notify VR users about non-immersed bystanders' in socially related, non-critical interaction contexts; (ii) understanding how best to provide awareness of non-immersed bystanders while maintaining presence and immersion within the Virtual Environment(VE). To this end, we developed single and combined alert cues - leveraging proxemics, perception channels, and push/pull approaches and evaluated those via two user studies. Our findings indicate a strong preference towards maintaining immersion and combining audio and visual cues, push and pull notification techniques that evolve dynamically based on proximity.},
author = {Medeiros, D and Anjos, RD and Pantidi, N and Huang, K and Sousa, M and Anslow, C and Jorge, J},
url = {https://eprints.whiterose.ac.uk/id/eprint/180010/}
}
As data sets grow to exascale, automated data analysis and visualisation are increasingly important, to intermediate human understanding and to reduce demands on disk storage via in situ analysis. Trends in architecture of high performance computing systems necessitate analysis algorithms to make effective use of combinations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses relationships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for computing the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analysis. While there is some work on distributed contour tree computation, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with formal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with more than 30{$\times$} parallel speed up on both CPU using TBB and GPU using Thrust and up 70{$\times$} speed up compared to the serial sweep and merge algorithm.
@article{wrro151668,
pages = {2437--2454},
volume = {27},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {April},
title = {Scalable Contour Tree Computation by Data Parallel Peak Pruning},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
number = {4},
note = {Protected by copyright. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
doi = {10.1109/TVCG.2019.2948616},
year = {2021},
url = {https://eprints.whiterose.ac.uk/id/eprint/151668/},
author = {Carr, HA and Weber, GH and Sewell, CM and R{\"u}bel, O and Fasel, P and Ahrens, JP},
abstract = {As data sets grow to exascale, automated data analysis and visualisation are increasingly important, to intermediate human understanding and to reduce demands on disk storage via in situ analysis. Trends in architecture of high performance computing systems necessitate analysis algorithms to make effective use of combinations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses relationships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for computing the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analysis. While there is some work on distributed contour tree computation, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with formal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with more than 30{$\times$} parallel speed up on both CPU using TBB and GPU using Thrust and up 70{$\times$} speed up compared to the serial sweep and merge algorithm.},
issn = {1077-2626}
}
Contour trees are used for topological data analysis in scientific visualization. While originally computed with serial algorithms, recent work has introduced a vector-parallel algorithm. However, this algorithm is relatively slow for fully augmented contour trees which are needed for many practical data analysis tasks. We therefore introduce a representation called the hyperstructure that enables efficient searches through the contour tree and use it to construct a fully augmented contour tree in data parallel, with performance on average 6 times faster than the state-of-the-art parallel algorithm in the TTK topological toolkit.
@article{wrro171318,
title = {Optimization and Augmentation for Data Parallel Contour Trees},
year = {2021},
publisher = {Institute of Electrical and Electronics Engineers},
doi = {10.1109/TVCG.2021.3064385},
note = {Published online
{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {March},
keywords = {Computational Topology, Contour Tree, Parallel Algorith},
issn = {1077-2626},
abstract = {Contour trees are used for topological data analysis in scientific visualization. While originally computed with serial algorithms, recent work has introduced a vector-parallel algorithm. However, this algorithm is relatively slow for fully augmented contour trees which are needed for many practical data analysis tasks. We therefore introduce a representation called the hyperstructure that enables efficient searches through the contour tree and use it to construct a fully augmented contour tree in data parallel, with performance on average 6 times faster than the state-of-the-art parallel algorithm in the TTK topological toolkit.},
author = {Carr, HA and R{\"u}bel, O and Weber, GH and Ahrens, JP},
url = {https://eprints.whiterose.ac.uk/id/eprint/171318/}
}
Deformation transfer is a type of retargeting method that operates directly on the mesh and, by doing so, enables reuse of animation without setting up character rigs and a mapping between the source and target geometries. Deformation transfer can potentially reduce the costs of animation and give studios a competitive edge when keeping up with the latest computer animation technology. Unfortunately, deformation transfer has limitations and is yet to become standard practice in the industry. This survey starts by introducing Sumner and Popović's [18] seminal work and highlights key issues for industry settings. We then review related work in sections, organized by these key issues. After surveying related work, we discuss how their advances open the door to several practical applications of deformation transfer. To conclude, we highlight areas of future work.
@article{wrro180012,
pages = {52--61},
month = {February},
journal = {Computers \& Graphics},
volume = {94},
title = {Deformation transfer survey},
publisher = {Elsevier},
note = {{\copyright} 2020 Elsevier Ltd. All rights reserved. This is an author produced version of an article published in / accepted for publication in Computers and Graphics. Uploaded in accordance with the publisher's self-archiving policy.},
year = {2021},
doi = {10.1016/j.cag.2020.10.004},
url = {https://eprints.whiterose.ac.uk/id/eprint/180012/},
author = {Roberts, RA and dos Anjos, RK and Maejima, A and Anjyo, K},
abstract = {Deformation transfer is a type of retargeting method that operates directly on the mesh and, by doing so, enables reuse of animation without setting up character rigs and a mapping between the source and target geometries. Deformation transfer can potentially reduce the costs of animation and give studios a competitive edge when keeping up with the latest computer animation technology. Unfortunately, deformation transfer has limitations and is yet to become standard practice in the industry. This survey starts by introducing Sumner and Popovi{\'c}'s [18] seminal work and highlights key issues for industry settings. We then review related work in sections, organized by these key issues. After surveying related work, we discuss how their advances open the door to several practical applications of deformation transfer. To conclude, we highlight areas of future work.},
keywords = {Deformation transfer; Retargeting; Animation; Industry applications},
issn = {0097-8493}
}
Adapting dashboard design to different contexts of use is an open question in visualisation research. Dashboard designers often seek to strike a balance between dashboard adaptability and ease-of-use, and in hospitals challenges arise from the vast diversity of key metrics, data models and users involved at different organizational levels. In this design study, we present QualDash, a dashboard generation engine that allows for the dynamic configuration and deployment of visualisation dashboards for healthcare quality improvement (QI). We present a rigorous task analysis based on interviews with healthcare professionals, a co-design workshop and a series of one-on-one meetings with front line analysts. From these activities we define a metric card metaphor as a unit of visual analysis in healthcare QI, using this concept as a building block for generating highly adaptable dashboards, and leading to the design of a Metric Specification Structure (MSS). Each MSS is a JSON structure which enables dashboard authors to concisely configure unit-specific variants of a metric card, while offloading common patterns that are shared across cards to be preset by the engine. We reflect on deploying and iterating the design of OualDash in cardiology wards and pediatric intensive care units of five NHS hospitals. Finally, we report evaluation results that demonstrate the adaptability, ease-of-use and usefulness of QualDash in a real-world scenario.
@article{wrro165165,
year = {2021},
doi = {10.1109/TVCG.2020.3030424},
note = {{\copyright} 2020, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
number = {2},
publisher = {IEEE},
title = {QualDash: Adaptable Generation of Visualisation Dashboards for Healthcare Quality Improvement},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {February},
volume = {27},
pages = {689--699},
issn = {1077-2626},
keywords = {Information visualisation, task analysis, co-design, dashboards, design study, healthcare},
abstract = {Adapting dashboard design to different contexts of use is an open question in visualisation research. Dashboard designers often seek to strike a balance between dashboard adaptability and ease-of-use, and in hospitals challenges arise from the vast diversity of key metrics, data models and users involved at different organizational levels. In this design study, we present QualDash, a dashboard generation engine that allows for the dynamic configuration and deployment of visualisation dashboards for healthcare quality improvement (QI). We present a rigorous task analysis based on interviews with healthcare professionals, a co-design workshop and a series of one-on-one meetings with front line analysts. From these activities we define a metric card metaphor as a unit of visual analysis in healthcare QI, using this concept as a building block for generating highly adaptable dashboards, and leading to the design of a Metric Specification Structure (MSS). Each MSS is a JSON structure which enables dashboard authors to concisely configure unit-specific variants of a metric card, while offloading common patterns that are shared across cards to be preset by the engine. We reflect on deploying and iterating the design of OualDash in cardiology wards and pediatric intensive care units of five NHS hospitals. Finally, we report evaluation results that demonstrate the adaptability, ease-of-use and usefulness of QualDash in a real-world scenario.},
author = {Elshehaly, M and Randell, R and Brehmer, M and McVey, L and Alvarado, N and Gale, CP and Ruddle, RA},
url = {https://eprints.whiterose.ac.uk/id/eprint/165165/}
}
Event sequences are central to the analysis of data in domains that range from biology and health, to logfile analysis and people's everyday behavior. Many visualization tools have been created for such data, but people are error-prone when asked to judge the similarity of event sequences with basic presentation methods. This paper describes an experiment that investigates whether local and global alignment techniques improve people's performance when judging sequence similarity. Participants were divided into three groups (basic vs. local vs. global alignment), and each participant judged the similarity of 180 sets of pseudo-randomly generated sequences. Each set comprised a target, a correct choice and a wrong choice. After training, the global alignment group was more accurate than the local alignment group (98\% vs. 93\% correct), with the basic group getting 95\% correct. Participants' response times were primarily affected by the number of event types, the similarity of sequences (measured by the Levenshtein distance) and the edit types (nine combinations of deletion, insertion and substitution). In summary, global alignment is superior and people's performance could be further improved by choosing alignment parameters that explicitly penalize sequence mismatches.
@article{wrro172191,
note = {Published online
{\copyright} 2020 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {January},
title = {The Effect of Alignment on Peoples Ability to Judge Event Sequence Similarity},
year = {2021},
publisher = {IEEE},
doi = {10.1109/TVCG.2021.3050497},
issn = {1077-2626},
keywords = {Event sequence visualization; sequence alignment; evaluation; user study},
abstract = {Event sequences are central to the analysis of data in domains that range from biology and health, to logfile analysis and people's everyday behavior. Many visualization tools have been created for such data, but people are error-prone when asked to judge the similarity of event sequences with basic presentation methods. This paper describes an experiment that investigates whether local and global alignment techniques improve people's performance when judging sequence similarity. Participants were divided into three groups (basic vs. local vs. global alignment), and each participant judged the similarity of 180 sets of pseudo-randomly generated sequences. Each set comprised a target, a correct choice and a wrong choice. After training, the global alignment group was more accurate than the local alignment group (98\% vs. 93\% correct), with the basic group getting 95\% correct. Participants' response times were primarily affected by the number of event types, the similarity of sequences (measured by the Levenshtein distance) and the edit types (nine combinations of deletion, insertion and substitution). In summary, global alignment is superior and people's performance could be further improved by choosing alignment parameters that explicitly penalize sequence mismatches.},
author = {Ruddle, RA and Bernard, J and L{\"u}cke-Tieke, H and May, T and Kohlhammer, J},
url = {https://eprints.whiterose.ac.uk/id/eprint/172191/}
}
Voice interaction with natural language understanding (NLU) has been extensively explored in desktop computers, handheld devices, and human-robot interaction. However, there is limited research into voice interaction with NLU in augmented reality (AR). There are benefits of using voice interaction in AR, such as high naturalness and being hands-free. In this project, we introduce VOARLA, an NLU-powered AR voice interface, which navigate courier driver delivery a package. A user study was completed to evaluate VOARLA against an AR voice interface without NLU to investigate the effectiveness of NLU in the navigation interface in AR. We evaluated from three aspects: accuracy, productivity, and commands learning curve. Results found that using NLU in AR increases the accuracy of the interface by 15\%. However, higher accuracy did not correlate to an increase in productivity. Results suggest that NLU helped users remember the commands on the first run when they were unfamiliar with the system. This suggests that using NLU in an AR hands-free application can make the learning curve easier for new users.
@misc{wrro194573,
doi = {10.1109/ivcnz51579.2020.9290643},
publisher = {IEEE},
booktitle = {35th International Conference on Image and Vision Computing New Zealand},
year = {2020},
title = {Voice Interaction for Augmented Reality Navigation Interfaces with Natural Language Understanding},
month = {December},
journal = {35th International Conference on Image and Vision Computing New Zealand (IVCNZ)},
abstract = {Voice interaction with natural language understanding (NLU) has been extensively explored in desktop computers, handheld devices, and human-robot interaction. However, there is limited research into voice interaction with NLU in augmented reality (AR). There are benefits of using voice interaction in AR, such as high naturalness and being hands-free. In this project, we introduce VOARLA, an NLU-powered AR voice interface, which navigate courier driver delivery a package. A user study was completed to evaluate VOARLA against an AR voice interface without NLU to investigate the effectiveness of NLU in the navigation interface in AR. We evaluated from three aspects: accuracy, productivity, and commands learning curve. Results found that using NLU in AR increases the accuracy of the interface by 15\%. However, higher accuracy did not correlate to an increase in productivity. Results suggest that NLU helped users remember the commands on the first run when they were unfamiliar with the system. This suggests that using NLU in an AR hands-free application can make the learning curve easier for new users.},
keywords = {Augmented Reality; speech recognition; natural language understanding (NLU); speech interaction; artificial intelligence; intelligent interface},
isbn = {978-1-7281-8580-4},
author = {Zhao, J and Parry, CJ and dos Anjos, R and Anslow, C and Rhee, T},
issn = {2151-2191},
url = {https://eprints.whiterose.ac.uk/id/eprint/194573/}
}
The contour tree is a tool for understanding the topological structure of a scalar field. Recent work has built efficient contour tree algorithms for shared memory parallel computation, driven by the need to analyze large data sets in situ while the simulation is running. Unfortunately, methods for using the contour tree for practical data analysis are still primarily serial, including single isocontour extraction, branch decomposition and simplification. We report data parallel methods for these tasks using a data structure called the hyperstructure and a general purpose approach called a hypersweep. We implement and integrate these methods with a Cinema database that stores features as depth images and with a web server that reconstructs the features for direct visualization.
@misc{wrro167115,
pages = {12--21},
month = {December},
journal = {Proceedings of2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)},
title = {Data Parallel Hypersweeps for in Situ Topological Analysis},
publisher = {IEEE},
note = {{\copyright} 2020, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
doi = {10.1109/LDAV51489.2020.00008},
booktitle = {2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)},
year = {2020},
isbn = {978-1-7281-8468-5},
author = {Hristov, P and Weber, G and Carr, H and R{\"u}bel, O and Ahrens, JP},
abstract = {The contour tree is a tool for understanding the topological structure of a scalar field. Recent work has built efficient contour tree algorithms for shared memory parallel computation, driven by the need to analyze large data sets in situ while the simulation is running. Unfortunately, methods for using the contour tree for practical data analysis are still primarily serial, including single isocontour extraction, branch decomposition and simplification. We report data parallel methods for these tasks using a data structure called the hyperstructure and a general purpose approach called a hypersweep. We implement and integrate these methods with a Cinema database that stores features as depth images and with a web server that reconstructs the features for direct visualization.},
keywords = {contour tree, in situ, scalar field, geometric measures, branch decomposition},
url = {https://eprints.whiterose.ac.uk/id/eprint/167115/},
issn = {2373-7514}
}
After two decades in computational topology, it is clearly a computationally challenging area. Not only do we have the usual algorithmic and programming difficulties with establishing correctness, we also have a class of problems that are mathematically complex and notationally fragile. Effective development and deployment therefore requires an additional step - construction or selection of suitable test cases. Since we cannot test all possible inputs, our selection of test cases expresses our understanding of the task and of the problems involved. Moreover, the scale of the data sets we work with is such that, no matter how unlikely the behaviour mathematically, it is nearly guaranteed to occur at scale in every run. The test cases we choose are therefore tightly coupled with mathematically pathological cases, and need to be developed using the skills expressed most obviously in the constructing mathematical counterexamples. This paper is therefore a first attempt at reporting, classifying and analysing test cases previously used in computational topology, and the expression of a philosophy of how to test topological code.
@incollection{wrro144396,
pages = {103--120},
month = {December},
title = {Pathological and Test Cases For Reeb Analysis},
publisher = {Springer},
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author accepted version of a chapter published in Carr H., Fujishiro I., Sadlo F., Takahashi S. (eds) Topological Methods in Data Analysis and Visualization V. TopoInVis 2017. Mathematics and Visualization. Springer, Cham. Uploaded in accordance with the publisher's self-archiving policy.},
series = {Mathematics and Visualization book series},
booktitle = {Topological Methods in Data Analysis and Visualization V},
doi = {10.1007/978-3-030-43036-8\_7},
year = {2020},
isbn = {978-3-030-43035-1},
url = {https://eprints.whiterose.ac.uk/id/eprint/144396/},
author = {Carr, H and Tierny, J and Weber, GH},
abstract = {After two decades in computational topology, it is clearly a computationally challenging area. Not only do we have the usual algorithmic and programming difficulties with establishing correctness, we also have a class of problems that are mathematically complex and notationally fragile. Effective development and deployment therefore requires an additional step - construction or selection of suitable test cases. Since we cannot test all possible inputs, our selection of test cases expresses our understanding of the task and of the problems involved. Moreover, the scale of the data sets we work with is such that, no matter how unlikely the behaviour mathematically, it is nearly guaranteed to occur at scale in every run. The test cases we choose are therefore tightly coupled with mathematically pathological cases, and need to be developed using the skills expressed most obviously in the constructing mathematical counterexamples. This paper is therefore a first attempt at reporting, classifying and analysing test cases previously used in computational topology, and the expression of a philosophy of how to test topological code.},
keywords = {Computational Topology, Reeb Space, Reeb Graph, Contour Tree, Reeb Analysis}
}
The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.
@incollection{wrro144583,
series = {Mathematics and Visualization book series},
title = {Flexible Fiber Surfaces: A Reeb-Free Approach},
publisher = {Springer International Publishing},
doi = {10.1007/978-3-030-43036-8\_12},
booktitle = {Topological Methods in Data Analysis and Visualization V},
year = {2020},
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author accepted version of a paper published in Sakurai D., Ono K., Carr H., Nonaka J., Kawanabe T. (2020) Flexible Fiber Surfaces: A Reeb-Free Approach. In: Carr H., Fujishiro I., Sadlo F., Takahashi S. (eds) Topological Methods in Data Analysis and Visualization V. TopoInVis 2017. Mathematics and Visualization. Springer, Cham. Uploaded in accordance with the publisher's self-archiving policy.},
month = {December},
abstract = {The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.},
isbn = {978-3-030-43035-1},
url = {https://eprints.whiterose.ac.uk/id/eprint/144583/},
author = {Sakurai, D and Ono, K and Carr, H and Nonaka, J and Kawanabe, T}
}
As Exascale computing proliferates, we see an accelerating shift towards clusters with thousands of nodes and thousands of cores per node, often on the back of commodity graphics processing units. This paper argues that this drives a once in a generation shift of computation, and that fundamentals of computer science therefore need to be re-examined. Exploiting the full power of Exascale computation will require attention to the fundamentals of programme design and specification, programming language design, systems and software engineering, analytic, performance and cost models, fundamental algorithmic design, and to increasing replacement of human bandwidth by computational analysis. As part of this, we will argue that Exascale computing will require a significant degree of co-design and close attention to the economics underlying the challenges ahead.
@misc{wrro164225,
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author produced version of a conference paper published in Lecture Notes in Computer Science. Uploaded in accordance with the publisher's self-archiving policy.
This version of the article has been accepted for publication, after peer review (when applicable) and is subject to Springer Nature's AM terms of use (https://www.springernature.com/gp/open-research/policies/accepted-manuscript-terms), but is not the Version of Record and does not reflect post-acceptance improvements, or any corrections. The Version of Record is available online at: https://doi.org/10.1007/978-3-030-63058-4\_19 .},
year = {2020},
editor = {K Djemame and J Altmann and J{\'A} Ba{\~n}ares and O Agmon Ben-Yehuda and V Stankovski and B Tuffin},
doi = {10.1007/978-3-030-63058-4\_19},
booktitle = {GECON2020: 17th International Conference on the Economics of Grids, Clouds, Systems, and Services},
pages = {211--216},
journal = {Lecture Notes in Computer Science},
month = {December},
volume = {12441},
title = {Exascale Computing Deployment Challenges},
address = {Cham, Switzerland},
publisher = {Springer},
issn = {0302-9743},
url = {http://gecon2020.gecon.info/},
abstract = {As Exascale computing proliferates, we see an accelerating shift towards clusters with thousands of nodes and thousands of cores per node, often on the back of commodity graphics processing units. This paper argues that this drives a once in a generation shift of computation, and that fundamentals of computer science therefore need to be re-examined. Exploiting the full power of Exascale computation will require attention to the fundamentals of programme design and specification, programming language design, systems and software engineering, analytic, performance and cost models, fundamental algorithmic design, and to increasing replacement of human bandwidth by computational analysis. As part of this, we will argue that Exascale computing will require a significant degree of co-design and close attention to the economics underlying the challenges ahead.},
keywords = {Exascale computing; High performance computing; Holistic approach; Economics},
isbn = {9783030630577},
author = {Djemame, K and Carr, H}
}
Eye-tracking with gaze estimation is a key element in many applications, ranging from foveated rendering and user interaction to behavioural analysis and usage metrics. For virtual reality, eye-tracking typically relies on near-eye cameras that are mounted in the VR headset. Such methods usually involve an initial calibration to create a mapping from eye features to a gaze position. However, the accuracy based on the initial calibration degrades when the position of the headset relative to the users? head changes; this is especially noticeable when users readjust the headset for comfort or even completely remove it for a short while. We show that a correction of such shifts can be achieved via 2D drift vectors in eye space. Our method estimates these drifts by extracting salient cues from the shown virtual environment to determine potential gaze directions. Our solution can compensate for HMD shifts, even those arising from taking off the headset, which enables us to eliminate reinitialization steps.
@article{wrro175920,
doi = {10.1016/j.cag.2020.06.007},
year = {2020},
pages = {83--94},
author = {P Shi and M Billeter and E Eisemann},
volume = {91},
journal = {Computers \& Graphics},
month = {October},
title = {SalientGaze: Saliency-based gaze correction in virtual reality},
publisher = {Elsevier},
url = {https://eprints.whiterose.ac.uk/id/eprint/175920/},
keywords = {Virtual reality; Eye-tracking; Headsets shifts; Saliency; Stereo; Drift estimation},
abstract = {Eye-tracking with gaze estimation is a key element in many applications, ranging from foveated rendering and user interaction to behavioural analysis and usage metrics. For virtual reality, eye-tracking typically relies on near-eye cameras that are mounted in the VR headset. Such methods usually involve an initial calibration to create a mapping from eye features to a gaze position. However, the accuracy based on the initial calibration degrades when the position of the headset relative to the users? head changes; this is especially noticeable when users readjust the headset for comfort or even completely remove it for a short while. We show that a correction of such shifts can be achieved via 2D drift vectors in eye space. Our method estimates these drifts by extracting salient cues from the shown virtual environment to determine potential gaze directions. Our solution can compensate for HMD shifts, even those arising from taking off the headset, which enables us to eliminate reinitialization steps.}
}
In this paper, we address the 3D object detection task by capturing multi-level contextual information with the self-attention mechanism and multi-scale feature fusion. Most existing 3D object detection methods recognize objects individually, without giving any consideration on contextual information between these objects. Comparatively, we propose Multi-Level Context VoteNet (MLCVNet) to recognize 3D objects correlatively, building on the state-of-the-art VoteNet. We introduce three context modules into the voting and classifying stages of VoteNet to encode contextual information at different levels. Specifically, a Patch-to-Patch Context (PPC) module is employed to capture contextual information between the point patches, before voting for their corresponding object centroid points. Subsequently, an Object-to-Object Context (OOC) module is incorporated before the proposal and classification stage, to capture the contextual information between object candidates. Finally, a Global Scene Context (GSC) module is designed to learn the global scene context. We demonstrate these by capturing contextual information at patch, object and scene levels. Our method is an effective way to promote detection accuracy, achieving new state-of-the-art detection performance on challenging 3D object detection datasets, i.e., SUN RGBD and ScanNet. We also release our code at https://github.com/NUAAXQ/MLCVNet.
@misc{wrro233704,
note = {{\copyright} 2020 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
year = {2020},
booktitle = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
doi = {10.1109/cvpr42600.2020.01046},
pages = {10444--10453},
journal = {2020 IEEE/CVF Conference on Computer Vision and Pattern Recognition (CVPR)},
month = {August},
title = {MLCVNet: Multi-Level Context VoteNet for 3D Object Detection},
publisher = {IEEE},
issn = {1063-6919},
url = {https://ieeexplore.ieee.org/document/9156370},
keywords = {Three-dimensional displays, Object detection, Two dimensional displays, Feature extraction, Task analysis, Proposals, Machine learning},
abstract = {In this paper, we address the 3D object detection task by capturing multi-level contextual information with the self-attention mechanism and multi-scale feature fusion. Most existing 3D object detection methods recognize objects individually, without giving any consideration on contextual information between these objects. Comparatively, we propose Multi-Level Context VoteNet (MLCVNet) to recognize 3D objects correlatively, building on the state-of-the-art VoteNet. We introduce three context modules into the voting and classifying stages of VoteNet to encode contextual information at different levels. Specifically, a Patch-to-Patch Context (PPC) module is employed to capture contextual information between the point patches, before voting for their corresponding object centroid points. Subsequently, an Object-to-Object Context (OOC) module is incorporated before the proposal and classification stage, to capture the contextual information between object candidates. Finally, a Global Scene Context (GSC) module is designed to learn the global scene context. We demonstrate these by capturing contextual information at patch, object and scene levels. Our method is an effective way to promote detection accuracy, achieving new state-of-the-art detection performance on challenging 3D object detection datasets, i.e., SUN RGBD and ScanNet. We also release our code at https://github.com/NUAAXQ/MLCVNet.},
author = {Xie, Q. and Lai, Y.-K. and Wu, J. and Wang, Z. and Zhang, Y. and Xu, K. and Wang, J.},
isbn = {978-1-7281-7169-2}
}
Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360o video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.
@article{wrro194575,
pages = {1923--1933},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {May},
volume = {26},
title = {Augmented Virtual Teleportation for High-Fidelity Telecollaboration},
publisher = {IEEE},
note = {{\copyright} 2020 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.
This is an author produced version of an article published in IEEE TRANSACTIONS ON VISUALIZATION AND COMPUTER GRAPHICS. Uploaded in accordance with the publisher's self-archiving policy.},
number = {5},
year = {2020},
doi = {10.1109/tvcg.2020.2973065},
url = {https://eprints.whiterose.ac.uk/id/eprint/194575/},
author = {Rhee, T and Thompson, S and Medeiros, D and dos Anjos, R and Chalmers, A},
abstract = {Telecollaboration involves the teleportation of a remote collaborator to another real-world environment where their partner is located. The fidelity of the environment plays an important role for allowing corresponding spatial references in remote collaboration. We present a novel asymmetric platform, Augmented Virtual Teleportation (AVT), which provides high-fidelity telepresence of a remote VR user (VR-Traveler) into a real-world collaboration space to interact with a local AR user (AR-Host). AVT uses a 360o video camera (360-camera) that captures and live-streams the omni-directional scenes over a network. The remote VR-Traveler watching the video in a VR headset experiences live presence and co-presence in the real-world collaboration space. The VR-Traveler's movements are captured and transmitted to a 3D avatar overlaid onto the 360-camera which can be seen in the AR-Host's display. The visual and audio cues for each collaborator are synchronized in the Mixed Reality Collaboration space (MRC-space), where they can interactively edit virtual objects and collaborate in the real environment using the real objects as a reference. High fidelity, real-time rendering of virtual objects and seamless blending into the real scene allows for unique mixed reality use-case scenarios. Our working prototype has been tested with a user study to evaluate spatial presence, co-presence, and user satisfaction during telecollaboration. Possible applications of AVT are identified and proposed to guide future usage.},
keywords = {Telepresence, Collaboration, Real-time, Mixed Reality, 360oPanoramic Video},
issn = {1077-2626}
}
Voxels are a popular choice to encode complex geometry. Their regularity makes updates easy and enables random retrieval of values. The main limitation lies in the poor scaling with respect to resolution. Sparse voxel DAGs (Directed Acyclic Graphs) overcome this hurdle and offer high-resolution representations for real-time rendering but only handle static data. We introduce a novel data structure to enable interactive modifications of such compressed voxel geometry without requiring de- and recompression. Besides binary data to encode geometry, it also supports compressed attributes (e.g., color). We illustrate the usefulness of our representation via an interactive large-scale voxel editor (supporting carving, filling, copying, and painting).
@article{wrro179723,
number = {2},
year = {2020},
doi = {10.1111/cgf.13916},
author = {V Careil and M Billeter and E Eisemann},
pages = {111--119},
journal = {Computer Graphics Forum},
month = {May},
volume = {39},
title = {Interactively Modifying Compressed Sparse Voxel Representations},
publisher = {Wiley},
url = {https://eprints.whiterose.ac.uk/id/eprint/179723/},
abstract = {Voxels are a popular choice to encode complex geometry. Their regularity makes updates easy and enables random retrieval of values. The main limitation lies in the poor scaling with respect to resolution. Sparse voxel DAGs (Directed Acyclic Graphs) overcome this hurdle and offer high-resolution representations for real-time rendering but only handle static data. We introduce a novel data structure to enable interactive modifications of such compressed voxel geometry without requiring de- and recompression. Besides binary data to encode geometry, it also supports compressed attributes (e.g., color). We illustrate the usefulness of our representation via an interactive large-scale voxel editor (supporting carving, filling, copying, and painting).},
keywords = {CCS Concepts; . Computing methodologies -{\ensuremath{>}} Volumetric models}
}
Healthcare organizations worldwide use quality dashboards to provide feedback to clinical teams and managers, in order to monitor care quality and stimulate quality improvement. However, there is limited evidence regarding the impact of quality dashboards and audit and feedback research focuses on feedback to individual clinicians, rather than to clinical and managerial teams. Consequently, we know little about what features a quality dashboard needs in order to provide benefit. We conducted 54 interviews across five healthcare organizations in the National Health Service in England, interviewing personnel at different levels of the organization, to understand how national (UK) clinical audit data are used for quality improvement and factors that support or constrain use of these data. The findings, organized around the themes of choosing performance indicators, assessing performance, identifying causes, communicating from ward to board, and data quality, have implications for the design of quality dashboards, which we have translated into a series of requirements.
@misc{wrro156817,
pages = {735--744},
month = {March},
journal = {AMIA Annual Symposium Proceedings},
volume = {2019},
title = {Requirements for a quality dashboard: Lessons from National Clinical Audits},
publisher = {American Medical Informatics Association},
note = {This is an author produced version of a conference paper published in AMIA Annual Symposium Proceedings. Uploaded with permission from the publisher.},
year = {2020},
booktitle = {AMIA 2019 Annual Symposium},
author = {Randell, R and Alvarado, N and McVey, L and Ruddle, RA and Doherty, P and Gale, C and Mamas, M and Dowding, D},
url = {https://knowledge.amia.org/69862-amia-1.4570936/t004-1.4574923?qr=1},
issn = {1942-597X},
abstract = {Healthcare organizations worldwide use quality dashboards to provide feedback to clinical teams and managers, in order to monitor care quality and stimulate quality improvement. However, there is limited evidence regarding the impact of quality dashboards and audit and feedback research focuses on feedback to individual clinicians, rather than to clinical and managerial teams. Consequently, we know little about what features a quality dashboard needs in order to provide benefit. We conducted 54 interviews across five healthcare organizations in the National Health Service in England, interviewing personnel at different levels of the organization, to understand how national (UK) clinical audit data are used for quality improvement and factors that support or constrain use of these data. The findings, organized around the themes of choosing performance indicators, assessing performance, identifying causes, communicating from ward to board, and data quality, have implications for the design of quality dashboards, which we have translated into a series of requirements.}
}
Introduction: National audits are used to monitor care quality and safety and are anticipated to reduce unexplained variations in quality by stimulating quality improvement (QI). However, variation within and between providers in the extent of engagement with national audits means that the potential for national audit data to inform QI is not being realised. This study will undertake a feasibility evaluation of QualDash, a quality dashboard designed to support clinical teams and managers to explore data from two national audits, the Myocardial Ischaemia National Audit Project (MINAP) and the Paediatric Intensive Care Audit Network (PICANet). Methods and analysis: Realist evaluation, which involves building, testing and refining theories of how an intervention works, provides an overall framework for this feasibility study. Realist hypotheses that describe how, in what contexts, and why QualDash is expected to provide benefit will be tested across five hospitals. A controlled interrupted time series analysis, using key MINAP and PICANet measures, will provide preliminary evidence of the impact of QualDash, while ethnographic observations and interviews over 12 months will provide initial insight into contexts and mechanisms that lead to those impacts. Feasibility outcomes include the extent to which MINAP and PICANet data are used, data completeness in the audits, and the extent to which participants perceive QualDash to be useful and express the intention to continue using it after the study period. Ethics and dissemination: The study has been approved by the University of Leeds School of Healthcare Research Ethics Committee. Study results will provide an initial understanding of how, in what contexts, and why quality dashboards lead to improvements in care quality. These will be disseminated to academic audiences, study participants, hospital IT departments and national audits. If the results show a trial is feasible, we will disseminate the QualDash software through a stepped wedge cluster randomised trial.
@article{wrro156818,
number = {2},
note = {{\copyright} Author(s) (or their employer(s)) 2020. Re-use permitted under CC BY. Published by BMJ. This is an open access article distributed in accordance with the Creative Commons Attribution 4.0 Unported (CC BY 4.0) license, which permits others to copy, redistribute, remix, transform and build upon this work for any purpose, provided the original work is properly cited, a link to the licence is given, and indication of whether changes were made. See: https://creativecommons.org/licenses/by/4.0/.},
doi = {10.1136/bmjopen-2019-033208},
year = {2020},
volume = {10},
journal = {BMJ Open},
month = {February},
publisher = {BMJ Publishing Group},
title = {How, in what contexts, and why do quality dashboards lead to improvements in care quality in acute hospitals? Protocol for a realist feasibility evaluation},
url = {https://eprints.whiterose.ac.uk/id/eprint/156818/},
author = {Randell, R and Alvarado, N and McVey, L and Greenhalgh, J and West, RM and Farrin, A and Gale, C and Parslow, R and Keen, J and Elshehaly, M and Ruddle, RA and Lake, J and Mamas, M and Feltbower, R and Dowding, D},
abstract = {Introduction: National audits are used to monitor care quality and safety and are anticipated to reduce unexplained variations in quality by stimulating quality improvement (QI). However, variation within and between providers in the extent of engagement with national audits means that the potential for national audit data to inform QI is not being realised. This study will undertake a feasibility evaluation of QualDash, a quality dashboard designed to support clinical teams and managers to explore data from two national audits, the Myocardial Ischaemia National Audit Project (MINAP) and the Paediatric Intensive Care Audit Network (PICANet).
Methods and analysis: Realist evaluation, which involves building, testing and refining theories of how an intervention works, provides an overall framework for this feasibility study. Realist hypotheses that describe how, in what contexts, and why QualDash is expected to provide benefit will be tested across five hospitals. A controlled interrupted time series analysis, using key MINAP and PICANet measures, will provide preliminary evidence of the impact of QualDash, while ethnographic observations and interviews over 12 months will provide initial insight into contexts and mechanisms that lead to those impacts. Feasibility outcomes include the extent to which MINAP and PICANet data are used, data completeness in the audits, and the extent to which participants perceive QualDash to be useful and express the intention to continue using it after the study period.
Ethics and dissemination: The study has been approved by the University of Leeds School of Healthcare Research Ethics Committee. Study results will provide an initial understanding of how, in what contexts, and why quality dashboards lead to improvements in care quality. These will be disseminated to academic audiences, study participants, hospital IT departments and national audits. If the results show a trial is feasible, we will disseminate the QualDash software through a stepped wedge cluster randomised trial.},
issn = {2044-6055}
}
3D reconstruction from anatomical slices allows anatomists to create three dimensional depictions of real structures by tracing organs from sequences of cryosections. However, conventional user interfaces rely on single-user experiences and mouse-based input to create content for education or training purposes. In this work, we present Anatomy Studio, a collaborative Mixed Reality tool for virtual dissection that combines tablets with styli and see-through head-mounted displays to assist anatomists by easing manual tracing and exploring cryosection images. We contribute novel interaction techniques intended to promote spatial understanding and expedite manual segmentation. By using mid-air interactions and interactive surfaces, anatomists can easily access any cryosection and edit contours, while following other user's contributions. A user study including experienced anatomists and medical professionals, conducted in real working sessions, demonstrates that Anatomy Studio is appropriate and useful for 3D reconstruction. Results indicate that Anatomy Studio encourages closely-coupled collaborations and group discussion, to achieve deeper insights.
@article{wrro194576,
publisher = {Elsevier},
title = {Anatomy Studio: A tool for virtual dissection through augmented 3D reconstruction},
month = {December},
journal = {Computers \& Graphics},
volume = {85},
pages = {74--84},
year = {2019},
doi = {10.1016/j.cag.2019.09.006},
note = {{\copyright} 2019 Elsevier Ltd. This is an author produced version of an article published in Computers \& Graphics. Uploaded in accordance with the publisher's self-archiving policy. This manuscript version is made available under the CC-BY-NC-ND 4.0 license
(http://creativecommons.org/licenses/by-nc-nd/4.0/.)},
url = {https://eprints.whiterose.ac.uk/id/eprint/194576/},
author = {Zorzal, ER and Sousa, M and Mendes, D and dos Anjos, RK and Medeiros, D and Paulo, SF and Rodrigues, P and Mendes, JJ and Delmas, V and Uhl, J-F and Mogorr{\'o}n, J and Jorge, JA and Lopes, DS},
abstract = {3D reconstruction from anatomical slices allows anatomists to create three dimensional depictions of real structures by tracing organs from sequences of cryosections. However, conventional user interfaces rely on single-user experiences and mouse-based input to create content for education or training purposes. In this work, we present Anatomy Studio, a collaborative Mixed Reality tool for virtual dissection that combines tablets with styli and see-through head-mounted displays to assist anatomists by easing manual tracing and exploring cryosection images. We contribute novel interaction techniques intended to promote spatial understanding and expedite manual segmentation. By using mid-air interactions and interactive surfaces, anatomists can easily access any cryosection and edit contours, while following other user's contributions. A user study including experienced anatomists and medical professionals, conducted in real working sessions, demonstrates that Anatomy Studio is appropriate and useful for 3D reconstruction. Results indicate that Anatomy Studio encourages closely-coupled collaborations and group discussion, to achieve deeper insights.},
keywords = {3D reconstruction; Collaboration; Medical image segmentation; Mixed reality; Tablet},
issn = {0097-8493}
}
A camera's shutter controls the incoming light that is reaching the camera sensor. Different shutters lead to wildly different results, and are often used as a tool in movies for artistic purpose, e.g., they can indirectly control the effect of motion blur. However, a physical camera is limited to a single shutter setting at any given moment. ShutterApp enables users to define spatio-temporally-varying virtual shutters that go beyond the options available in real-world camera systems. A user provides a sparse set of annotations that define shutter functions at selected locations in key frames. From this input, our solution defines shutter functions for each pixel of the video sequence using a suitable interpolation technique, which are then employed to derive the output video. Our solution performs in real-time on commodity hardware. Hereby, users can explore different options interactively, leading to a new level of expressiveness without having to rely on specialized hardware or laborious editing.
@article{wrro179724,
author = {NZ Salamon and M Billeter and E Eisemann},
pages = {675--683},
month = {October},
journal = {Computer Graphics Forum},
volume = {38},
title = {ShutterApp: Spatio-temporal Exposure Control for Videos},
publisher = {Wiley},
number = {7},
year = {2019},
doi = {10.1111/cgf.13870},
abstract = {A camera's shutter controls the incoming light that is reaching the camera sensor. Different shutters lead to wildly different results, and are often used as a tool in movies for artistic purpose, e.g., they can indirectly control the effect of motion blur. However, a physical camera is limited to a single shutter setting at any given moment. ShutterApp enables users to define spatio-temporally-varying virtual shutters that go beyond the options available in real-world camera systems. A user provides a sparse set of annotations that define shutter functions at selected locations in key frames. From this input, our solution defines shutter functions for each pixel of the video sequence using a suitable interpolation technique, which are then employed to derive the output video. Our solution performs in real-time on commodity hardware. Hereby, users can explore different options interactively, leading to a new level of expressiveness without having to rely on specialized hardware or laborious editing.},
url = {https://eprints.whiterose.ac.uk/id/eprint/179724/}
}
Most researchers use a single method of mining to analyze event data. This paper uses case studies from two very different domains (electronic health records and cybersecurity) to investigate how researchers can gain breakthrough insights by combining multiple event mining methods in a visual analytics workflow. The aim of the health case study was to identify patterns of missing values, which was daunting because the 615 million missing values occurred in 43,219 combinations of fields. However, a workflow that involved exclusive set intersections (ESI), frequent itemset mining (FIM) and then two more ESI steps allowed us to identify that 82\% of the missing values were from just 244 combinations. The cybersecurity case study's aim was to understand users' behavior from logs that contained 300 types of action, gathered from 15,000 sessions and 1,400 users. Sequential frequent pattern mining (SFPM) and ESI highlighted some patterns in common, and others that were not. For the latter, SFPM stood out for its ability to action sequences that were buried within otherwise different sessions, and ESI detected subtle signals that were missed by SFPM. In summary, this paper demonstrates the importance of using multiple perspectives, complementary set mining methods and a diverse workflow when using visual analytics to analyze complex event data.
@misc{wrro147228,
pages = {61--65},
journal = {EuroVis Workshop on Visual Analytics (EuroVA) 2019},
month = {June},
title = {Visual Analytics of Event Data using Multiple Mining Methods},
publisher = {The Eurographics Association},
note = {{\copyright} 2019 by the Eurographics Association. This is an author produced version of a conference paper published in EuroVis Workshop on Visual Analytics (EuroVA) 2019. Uploaded in accordance with the publisher's self-archiving policy. },
booktitle = {EuroVis Workshop on Visual Analytics (EuroVA) 2019},
doi = {10.2312/eurova.20191126},
editor = {C Turkay and T von Landesberger},
year = {2019},
abstract = {Most researchers use a single method of mining to analyze event data. This paper uses case studies from two very different domains (electronic health records and cybersecurity) to investigate how researchers can gain breakthrough insights by combining multiple event mining methods in a visual analytics workflow. The aim of the health case study was to identify patterns of missing values, which was daunting because the 615 million missing values occurred in 43,219 combinations of fields. However, a workflow that involved exclusive set intersections (ESI), frequent itemset mining (FIM) and then two more ESI steps allowed us to identify that 82\% of the missing values were from just 244 combinations. The cybersecurity case study's aim was to understand users' behavior from logs that contained 300 types of action, gathered from 15,000 sessions and 1,400 users. Sequential frequent pattern mining (SFPM) and ESI highlighted some patterns in common, and others that were not. For the latter, SFPM stood out for its ability to action sequences that were buried within otherwise different sessions, and ESI detected subtle signals that were missed by SFPM. In summary, this paper demonstrates the importance of using multiple perspectives, complementary set mining methods and a diverse workflow when using visual analytics to analyze complex event data.},
author = {Adnan, M and Nguyen, PH and Ruddle, RA and Turkay, C},
isbn = {978-3-03868-087-1},
url = {https://eprints.whiterose.ac.uk/id/eprint/147228/}
}
In this design study, we present a visualization technique that segments patients' histories instead of treating them as raw event sequences, aggregates the segments using criteria such as the whole history or treatment combinations, and then visualizes the aggregated segments as static dashboards that are arranged in a dashboard network to show longitudinal changes. The static dashboards were developed in nine iterations, to show 15 important attributes from the patients' histories. The final design was evaluated with five non-experts, five visualization experts and four medical experts, who successfully used it to gain an overview of a 2,000 patient dataset, and to make observations about longitudinal changes and differences between two cohorts. The research represents a step-change in the detail of large-scale data that may be successfully visualized using dashboards, and provides guidance about how the approach may be generalized.
@article{wrro128739,
doi = {10.1109/TVCG.2018.2803829},
year = {2019},
note = {{\copyright} 2018, IEEE. This is an author produced version of a paper published in IEEE Transactions on Visualization and Computer Graphics. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works. Uploaded in accordance with the publisher's self-archiving policy.},
number = {3},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {Using Dashboard Networks to Visualize Multiple Patient Histories: A Design Study on Post-operative Prostate Cancer},
volume = {25},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {March},
pages = {1615--1628},
abstract = {In this design study, we present a visualization technique that segments patients' histories instead of treating them as raw event sequences, aggregates the segments using criteria such as the whole history or treatment combinations, and then visualizes the aggregated segments as static dashboards that are arranged in a dashboard network to show longitudinal changes. The static dashboards were developed in nine iterations, to show 15 important attributes from the patients' histories. The final design was evaluated with five non-experts, five visualization experts and four medical experts, who successfully used it to gain an overview of a 2,000 patient dataset, and to make observations about longitudinal changes and differences between two cohorts. The research represents a step-change in the detail of large-scale data that may be successfully visualized using dashboards, and provides guidance about how the approach may be generalized.},
keywords = {Information Visualization, Visual Analytics, Multivariate Data Visualization, Electronic Health Care Records, Medical Data Analysis, Prostate Cancer Disease, Design Study, User Study, Evaluation, Static Dashboard, Dashboard Network},
issn = {1077-2626},
url = {https://eprints.whiterose.ac.uk/id/eprint/128739/},
author = {Bernard, J and Sessler, D and Kohlhammer, J and Ruddle, RA}
}
Descriptive statistics are typically presented as text, but that quickly becomes overwhelming when datasets contain many variables or analysts need to compare multiple datasets. Visualization offers a solution, but is rarely used apart from to show cardinalities (e.g., the \% missing values) or distributions of a small set of variables. This paper describes dataset- and variable-centric designs for visualizing three categories of descriptive statistic (cardinalities, distributions and patterns), which scale to more than 100 variables, and use multiple channels to encode important semantic differences (e.g., zero vs. 1+ missing values). We evaluated our approach using large (multi-million record) primary and secondary care datasets. The miniature visualizations provided our users with a variety of important insights, including differences in character patterns that indicate data validation issues, missing values for a variable that should always be complete, and inconsistent encryption of patient identifiers. Finally, we highlight the need for research into methods of identifying anomalies in the distributions of dates in health data.
@misc{wrro140847,
note = {This is an author produced version of a paper accepted for publication in the Proceedings of the 12th International Joint Conference on Biomedical Engineering Systems and Technologies.},
year = {2019},
booktitle = {HEALTHINF 2019},
doi = {10.5220/0007354802300238},
pages = {230--238},
journal = {Proceedings of the 12th International Joint Conference on Biomedical Engineering Systems and Technologies - Volume 5: HEALTHINF},
title = {Using Miniature Visualizations of Descriptive Statistics to Investigate the Quality of Electronic Health Records},
publisher = {SciTePress},
abstract = {Descriptive statistics are typically presented as text, but that quickly becomes overwhelming when datasets contain many variables or analysts need to compare multiple datasets. Visualization offers a solution, but is rarely used apart from to show cardinalities (e.g., the \% missing values) or distributions of a small set of variables. This paper describes dataset- and variable-centric designs for visualizing three categories of descriptive statistic (cardinalities, distributions and patterns), which scale to more than 100 variables, and use multiple channels to encode important semantic differences (e.g., zero vs. 1+ missing values). We evaluated our approach using large (multi-million record) primary and secondary care datasets. The miniature visualizations provided our users with a variety of important insights, including differences in character patterns that indicate data validation issues, missing values for a variable that should always be complete, and inconsistent encryption of patient identifiers. Finally, we highlight the need for research into methods of identifying anomalies in the distributions of dates in health data.},
keywords = {Data Visualization; Electronic Health Records; Data Quality},
url = {https://eprints.whiterose.ac.uk/id/eprint/140847/},
isbn = {978-989-758-353-7},
author = {Ruddle, R and Hall, M}
}
The use of videos as an input for a rendering process (video-based rendering, VBR) has recently been started to be looked upon with greater interest, and has added many other challenges and also solutions to classical image-based rendering (IBR). Although the general goal of VBR is shared by different applications, approaches widely differ regarding methodology, setup, and data representation. Previous attempts on classifying VBR techniques used external aspects as classification parameters, providing little insight on the inner similarities between works, and not defining clear lines of research. We found that the chosen navigation paradigm for a VBR application is ultimately the deciding factor on several details of a VBR technique. Based on this statement, this article presents the state of art on video-based rendering and its relations and dependencies to the used data representation and image processing techniques. We present a novel taxonomy for VBR applications with the navigation paradigm being the topmost classification attribute, and methodological aspects further down in the hierarchy. Different view generation methodologies, capture baselines and data representations found in the body of work are described, and their relation to the chosen classification scheme is discussed.
@article{wrro194581,
publisher = {Elsevier},
title = {A navigation paradigm driven classification for video-based rendering techniques},
journal = {Computers \& Graphics},
month = {December},
volume = {77},
pages = {205--216},
year = {2018},
doi = {10.1016/j.cag.2018.10.017},
note = {{\copyright} 2018 Published by Elsevier Ltd. This is an author produced version of an article published in Computers \& Graphics. Uploaded in accordance with the publisher's self-archiving policy. This manuscript version is made available under the CC-BY-NC-ND 4.0 license http://creativecommons.org/licenses/by-nc-nd/4.0/.},
keywords = {Video-based rendering; Data representation; Application; Navigation paradigm; Free viewpoint video},
issn = {0097-8493},
abstract = {The use of videos as an input for a rendering process (video-based rendering, VBR) has recently been started to be looked upon with greater interest, and has added many other challenges and also solutions to classical image-based rendering (IBR). Although the general goal of VBR is shared by different applications, approaches widely differ regarding methodology, setup, and data representation. Previous attempts on classifying VBR techniques used external aspects as classification parameters, providing little insight on the inner similarities between works, and not defining clear lines of research. We found that the chosen navigation paradigm for a VBR application is ultimately the deciding factor on several details of a VBR technique. Based on this statement, this article presents the state of art on video-based rendering and its relations and dependencies to the used data representation and image processing techniques. We present a novel taxonomy for VBR applications with the navigation paradigm being the topmost classification attribute, and methodological aspects further down in the hierarchy. Different view generation methodologies, capture baselines and data representations found in the body of work are described, and their relation to the chosen classification scheme is discussed.},
author = {dos Anjos, RK and Pereira, J and Gaspar, J},
url = {https://eprints.whiterose.ac.uk/id/eprint/194581/}
}
Navigating 3-D parameter domains, such as color and orientation of an object, is a common task performed in most computer graphics applications. Although 1-D sliders are the most common interface for browsing such domains, they provide a tedious and difficult user experience that hampers finding desirable visual solutions. We present the Rhomb-i slider, a novel and visually enriching tile-based interface to navigate arbitrary 3-D parameter domains. Contrary to 1-D sliders, the Rhomb-i slider supports a sketch-based interface that gives simultaneous access to up to two parameters. We conducted a usability study to ascertain whether the proposed Rhomb-i slider is a more natural interface compared to 1-D sliders and other commonly used widgets for different 3-D parameter domains: HSV color space, super-shape curves, and rotation of a 3-D object. On the one hand, qualitative feedback and performance measures reveal that Rhomb-i sliders have similar results when compared to conventional HSV color interfaces, and are the preferred interface to efficiently explore the super-shapes parameter domain. On the other hand, Rhomb-i revealed to be a less efficient and effective interface to rotate a 3D object, thus paving the way to new design explorations regarding this tile-based interface.
@article{wrro194584,
note = {{\copyright} 2018 Elsevier Ltd. This is an author produced version of an article published in the International Journal of Human-Computer Studies. Uploaded in accordance with the publisher's self-archiving policy.This manuscript version is made available under the CC-BY-NC-ND 4.0 license http://creativecommons.org/licenses/by-nc-nd/4.0/.},
year = {2018},
doi = {10.1016/j.ijhcs.2018.05.005},
pages = {1--13},
month = {October},
journal = {International Journal of Human-Computer Studies},
volume = {118},
title = {Assessing the usability of tile-based interfaces to visually navigate 3-D parameter domains},
publisher = {Elsevier},
url = {https://eprints.whiterose.ac.uk/id/eprint/194584/},
author = {Lopes, DS and dos Anjos, RK and Jorge, JA},
abstract = {Navigating 3-D parameter domains, such as color and orientation of an object, is a common task performed in most computer graphics applications. Although 1-D sliders are the most common interface for browsing such domains, they provide a tedious and difficult user experience that hampers finding desirable visual solutions. We present the Rhomb-i slider, a novel and visually enriching tile-based interface to navigate arbitrary 3-D parameter domains. Contrary to 1-D sliders, the Rhomb-i slider supports a sketch-based interface that gives simultaneous access to up to two parameters. We conducted a usability study to ascertain whether the proposed Rhomb-i slider is a more natural interface compared to 1-D sliders and other commonly used widgets for different 3-D parameter domains: HSV color space, super-shape curves, and rotation of a 3-D object. On the one hand, qualitative feedback and performance measures reveal that Rhomb-i sliders have similar results when compared to conventional HSV color interfaces, and are the preferred interface to efficiently explore the super-shapes parameter domain. On the other hand, Rhomb-i revealed to be a less efficient and effective interface to rotate a 3D object, thus paving the way to new design explorations regarding this tile-based interface.},
issn = {1071-5819},
keywords = {Sliders; Rhombille tiling; HSV color space; Super-shape curves; 3-D rotation}
}
Current state-of-the-art point cloud visualization techniques have shortcomings when dealing with sparse and less accurate data or close-up interactions. In this paper, we present a visualization technique called stroke-based splatting, which applies concepts of stroke-based rendering to surface-aligned splatting, allowing for better shape perception at lower resolutions and close-ups. We create a painterly depiction of the data with an impressionistic aesthetic, which is a metaphor the user is culturally trained to recognize, thus attributing higher quality to the visualization. This is achieved by shaping each object-aligned splat as a brush stroke, and orienting it according to globally coherent tangent vectors from the Householder formula, creating a painterly depiction of the scanned cloud. Each splat is sized according to a color-based clustering analysis of the data, ensuring the consistency of brush strokes within neighborhood areas. By controlling brush shape generation parameters and blending factors between neighboring splats, the user is able to simulate different painting styles in real time. We have tested our method with data sets captured by commodity laser scanners as well as publicly available high-resolution point clouds, both having highly interactive frame rates in all cases. In addition, a user study was conducted comparing our approach to state-of-the-art point cloud visualization techniques. Users considered stroke-based splatting a valuable technique as it provides a higher or similar visual quality to current approaches.
@article{wrro194583,
year = {2018},
doi = {10.1007/s00371-017-1420-7},
note = {{\copyright} The Author(s) 2017. This is an open access article published under the terms of the Creative Commons Attribution License (CC-BY 4.0), which permits unrestricted use, distribution and reproduction in any medium, provided the original work is properly cited.},
number = {10},
publisher = {Springer},
title = {Stroke-based splatting: an efficient multi-resolution point cloud visualization technique},
journal = {The Visual Computer},
month = {October},
volume = {34},
pages = {1383--1397},
abstract = {Current state-of-the-art point cloud visualization techniques have shortcomings when dealing with sparse and less accurate data or close-up interactions. In this paper, we present a visualization technique called stroke-based splatting, which applies concepts of stroke-based rendering to surface-aligned splatting, allowing for better shape perception at lower resolutions and close-ups. We create a painterly depiction of the data with an impressionistic aesthetic, which is a metaphor the user is culturally trained to recognize, thus attributing higher quality to the visualization. This is achieved by shaping each object-aligned splat as a brush stroke, and orienting it according to globally coherent tangent vectors from the Householder formula, creating a painterly depiction of the scanned cloud. Each splat is sized according to a color-based clustering analysis of the data, ensuring the consistency of brush strokes within neighborhood areas. By controlling brush shape generation parameters and blending factors between neighboring splats, the user is able to simulate different painting styles in real time. We have tested our method with data sets captured by commodity laser scanners as well as publicly available high-resolution point clouds, both having highly interactive frame rates in all cases. In addition, a user study was conducted comparing our approach to state-of-the-art point cloud visualization techniques. Users considered stroke-based splatting a valuable technique as it provides a higher or similar visual quality to current approaches.},
issn = {0178-2789},
keywords = {Householder formula; Non-photorealistic rendering; Point cloud visualization; Splatting},
url = {https://eprints.whiterose.ac.uk/id/eprint/194583/},
author = {dos Anjos, RK and Ribeiro, CS and Lopes, DS and Pereira, JM}
}
We present a taxonomy-driven approach to requirements specification in a large-scale project setting, drawing on our work to develop visualization dashboards for improving the quality of healthcare. Our aim is to overcome some of the limitations of the qualitative methods that are typically used for requirements analysis. When applied alone, methods like interviews fall short in identifying the full set of functionalities that a visualization system should support. We present a five-stage pipeline to structure user task elicitation and analysis around well-established taxonomic dimensions, and make the following contributions: (i) criteria for selecting dimensions from the large body of task taxonomies in the literature,, (ii) use of three particular dimensions (granularity, type cardinality and target) to create materials for a requirements analysis workshop with domain experts, (iii) a method for characterizing the task space that was produced by the experts in the workshop, (iv) a decision tree that partitions that space and maps it to visualization design alternatives, and (v) validating our approach by testing the decision tree against new tasks that collected through interviews with further domain experts.
@misc{wrro136486,
journal = {Proceedings of the IEEE VIS Workshop on Evaluation and Beyond - Methodological Approaches for Visualization (BELIV)},
month = {October},
title = {From Taxonomy to Requirements: A Task Space Partitioning Approach},
booktitle = {BELIV Workshop 2018},
publisher = {IEEE},
year = {2018},
url = {https://beliv-workshop.github.io/schedule.html},
author = {Elshehal, M and Alvarado, N and McVey, L and Randell, R and Mamas, M and Ruddle, RA},
abstract = {We present a taxonomy-driven approach to requirements specification in a large-scale project setting, drawing on our work to develop visualization dashboards for improving the quality of healthcare. Our aim is to overcome some of the limitations of the qualitative methods that are typically used for requirements analysis. When applied alone, methods like interviews fall short in identifying the full set of functionalities that a visualization system should support. We present a five-stage pipeline to structure user task elicitation and analysis around well-established taxonomic dimensions, and make the following contributions: (i) criteria for selecting dimensions from the large body of task taxonomies in the literature,, (ii) use of three particular dimensions (granularity, type cardinality and target) to create materials for a requirements analysis workshop with domain experts, (iii) a method for characterizing the task space that was produced by the experts in the workshop, (iv) a decision tree that partitions that space and maps it to visualization design alternatives, and (v) validating our approach by testing the decision tree against new tasks that collected through interviews with further domain experts.},
keywords = {Human-centered computing, Visualization, Visualization design and evaluation methods}
}
This paper explores how a set-based visual analytics approach could be useful for analyzing customers' shopping behavior, and makes three main contributions. First, it describes the scale and characteristics of a real-world retail dataset from a major supermarket. Second, it presents a scalable visual analytics workflow to quickly identify patterns in shopping behavior. To assess the workflow, we conducted a case study that used data from four convenience stores and provides several insights about customers' shopping behavior. Third, from our experience with analyzing real-world retail data and comments made by our industry partner, we outline four research challenges for visual analytics to tackle large set intersection problems.
@misc{wrro131939,
journal = {Proceedings of the EuroVis Workshop on Visual Analytics (EuroVA18)},
month = {June},
volume = {EuroVA},
title = {A set-based visual analytics approach to analyze retail data},
publisher = {The Eurographics Association},
note = {(c) 2018, The Author(s). Eurographics Proceedings (c) 2018, The Eurographics Association. Uploaded in accordance with the publisher's self-archiving policy. },
year = {2018},
booktitle = {9th International EuroVis Workshop on Visual Analytics},
doi = {10.2312/eurova.20181110},
abstract = {This paper explores how a set-based visual analytics approach could be useful for analyzing customers' shopping behavior, and makes three main contributions. First, it describes the scale and characteristics of a real-world retail dataset from a major supermarket. Second, it presents a scalable visual analytics workflow to quickly identify patterns in shopping behavior. To assess the workflow, we conducted a case study that used data from four convenience stores and provides several insights about customers' shopping behavior. Third, from our experience with analyzing real-world retail data and comments made by our industry partner, we outline four research challenges for visual analytics to tackle large set intersection problems.},
author = {Adnan, M and Ruddle, R},
isbn = {978-3-03868-064-2},
url = {https://diglib.eg.org/handle/10.2312/eurova20181110}
}
The aim of the PETMiner software is to reduce the time and monetary cost of analysing petrophysical data that is obtained from reservoir sample cores. Analysis of these data requires tacit knowledge to fill 'gaps' so that predictions can be made for incomplete data. Through discussions with 30 industry and academic specialists, we identified three analysis use cases that exemplified the limitations of current petrophysics analysis tools. We used those use cases to develop nine core requirements for PETMiner, which is innovative because of its ability to display detailed images of the samples as data points, directly plot multiple sample properties and derived measures for comparison, and substantially reduce interaction cost. An 11-month evaluation demonstrated benefits across all three use cases by allowing a consultant to: (1) generate more accurate reservoir flow models, (2) discover a previously unknown relationship between one easy-to-measure property and another that is costly, and (3) make a 100-fold reduction in the time required to produce plots for a report.
@article{wrro113580,
volume = {24},
month = {May},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {1728--1741},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {PETMiner - A visual analysis tool for petrophysical properties of core sample data},
note = {{\copyright} 2017 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
number = {5},
doi = {10.1109/TVCG.2017.2682865},
year = {2018},
abstract = {The aim of the PETMiner software is to reduce the time and monetary cost of analysing petrophysical data that is obtained from reservoir sample cores. Analysis of these data requires tacit knowledge to fill 'gaps' so that predictions can be made for incomplete data. Through discussions with 30 industry and academic specialists, we identified three analysis use cases that exemplified the limitations of current petrophysics analysis tools. We used those use cases to develop nine core requirements for PETMiner, which is innovative because of its ability to display detailed images of the samples as data points, directly plot multiple sample properties and derived measures for comparison, and substantially reduce interaction cost. An 11-month evaluation demonstrated benefits across all three use cases by allowing a consultant to: (1) generate more accurate reservoir flow models, (2) discover a previously unknown relationship between one easy-to-measure property and another that is costly, and (3) make a 100-fold reduction in the time required to produce plots for a report.},
issn = {1077-2626},
keywords = {Visualization Systems and Software; Information Visualization; Design Study},
url = {https://doi.org/10.1109/TVCG.2017.2682865},
author = {Harrison, DG and Efford, ND and Fisher, QJ and Ruddle, RA}
}
Rendering vector maps is a key challenge for high?quality geographic visualization systems. In this paper, we present a novel approach to visualize vector maps over detailed terrain models in a pixel?precise way. Our method proposes a deferred line rendering technique to display vector maps directly in a screen?space shading stage over the 3D terrain visualization. Due to the absence of traditional geometric polygonal rendering, our algorithm is able to outperform conventional vector map rendering algorithms for geographic information systems, and supports advanced line anti?aliasing as well as slope distortion correction. Furthermore, our deferred line rendering enables interactively customizable advanced vector styling methods as well as a tool for interactive pixel?based editing operations.
@article{wrro169264,
number = {1},
note = {{\copyright} 2017 The Authors Computer Graphics Forum {\copyright} 2017 The Eurographics Association and John Wiley \& Sons Ltd.
This is the peer reviewed version of the following article: Th{\"o}ny, M., Billeter, M. and Pajarola, R. (2018), Large?Scale Pixel?Precise Deferred Vector Maps. Computer Graphics Forum, 37: 338-349. , which has been published in final form at https://doi.org/10.1111/cgf.13294. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Use of Self-Archived Versions},
year = {2018},
doi = {10.1111/cgf.13294},
month = {February},
journal = {Computer Graphics Forum},
volume = {37},
pages = {338--349},
author = {M Th{\"o}ny and M Billeter and R Pajarola},
publisher = {Wiley},
title = {Large-Scale Pixel-Precise Deferred Vector Maps},
url = {https://eprints.whiterose.ac.uk/id/eprint/169264/},
keywords = {real?time rendering; rendering; scientific visualization; visualization; I.3.3 [Computer Graphics]: Picture/Image Generation?Line and curve generation},
abstract = {Rendering vector maps is a key challenge for high?quality geographic visualization systems. In this paper, we present a novel approach to visualize vector maps over detailed terrain models in a pixel?precise way. Our method proposes a deferred line rendering technique to display vector maps directly in a screen?space shading stage over the 3D terrain visualization. Due to the absence of traditional geometric polygonal rendering, our algorithm is able to outperform conventional vector map rendering algorithms for geographic information systems, and supports advanced line anti?aliasing as well as slope distortion correction. Furthermore, our deferred line rendering enables interactively customizable advanced vector styling methods as well as a tool for interactive pixel?based editing operations.}
}
The rising quantity and complexity of data creates a need to design and optimize data processing pipelines - the set of data processing steps, parameters and algorithms that perform operations on the data. Visualization can support this process but, although there are many examples of systems for visual parameter analysis, there remains a need to systematically assess users' requirements and match those requirements to exemplar visualization methods. This article presents a new characterization of the requirements for pipeline design and optimization. This characterization is based on both a review of the literature and first-hand assessment of eight application case studies. We also match these requirements with exemplar functionality provided by existing visualization tools. Thus, we provide end-users and visualization developers with a way of identifying functionality that addresses data processing problems in an application. We also identify seven future challenges for visualization research that are not met by the capabilities of today's systems.
@article{wrro104078,
note = {(c) 2016, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
number = {8},
year = {2017},
doi = {10.1109/TVCG.2016.2603178},
month = {August},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {23},
pages = {2028--2041},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {Visualization system requirements for data processing pipeline design and optimization},
author = {von Landesberger, T and Fellner, DW and Ruddle, RA},
url = {http://dx.doi.org/10.1109/TVCG.2016.2603178},
keywords = {Visualization systems, requirement analysis, data processing pipelines},
issn = {1077-2626},
abstract = {The rising quantity and complexity of data creates a need to design and optimize data processing pipelines - the set of data processing steps, parameters and algorithms that perform operations on the data. Visualization can support this process but, although there are many examples of systems for visual parameter analysis, there remains a need to systematically assess users' requirements and match those requirements to exemplar visualization methods. This article presents a new characterization of the requirements for pipeline design and optimization. This characterization is based on both a review of the literature and first-hand assessment of eight application case studies. We also match these requirements with exemplar functionality provided by existing visualization tools. Thus, we provide end-users and visualization developers with a way of identifying functionality that addresses data processing problems in an application. We also identify seven future challenges for visualization research that are not met by the capabilities of today's systems.}
}
Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.
@article{wrro100067,
doi = {10.1109/TVCG.2016.2570215},
year = {2017},
number = {7},
note = {(c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Fast and Exact Fiber Surfaces for Tetrahedral Meshes},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
pages = {1782--1795},
volume = {23},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {July},
url = {https://eprints.whiterose.ac.uk/id/eprint/100067/},
author = {Klacansky, P and Tierny, J and Carr, H and Geng, Z},
abstract = {Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.},
keywords = {Bivariate Data, Data Segmentation, Data Analysis, Isosurfaces, Continuous Scatterplot},
issn = {1077-2626}
}
This research addresses the general topic of 'keeping found things found' by investigating difficulties people encounter when revisiting webpages, and designing and evaluating a novel tool that addresses those difficulties. The research focused on occasional revisits-webpages that people have previously visited on only one day, a week or more ago (i.e. neither frequently nor recently). A 3-month logging study was combined with a laboratory experiment to identify 10 underlying causes of participants' revisiting failure. Overall, 61\% of the failures occurred when a webpage had originally been accessed via search results, was on a topic a participant often looked at or was on a known but large website. Then, we designed a novel visual Web history tool to address the causes of failure and implemented it as a Firefox add-on. The tool was evaluated in a 3-month field study, helped participants succeed on 96\% of revisits, and was also used by some participants to review and reminisce about their 'travels' online. Revised versions of the tool have been publicly released as the Firefox add-on MyWebSteps.
@article{wrro110716,
pages = {530--551},
journal = {Interacting with Computers},
month = {July},
volume = {29},
title = {MyWebSteps: Aiding Revisiting with a Visual Web History},
publisher = {Oxford University Press},
number = {4},
note = {{\copyright} The Author 2017. Published by Oxford University Press on behalf of The British Computer Society. This is a pre-copyedited, author-produced PDF of an article accepted for publication in Interacting with Computers following peer review. The version of record Trien V. Do, Roy A. Ruddle; MyWebSteps: Aiding Revisiting with a Visual Web History. Interact Comput 2017 1-22. doi: 10.1093/iwc/iww038 is available online at: https://doi.org/10.1093/iwc/iww038.},
year = {2017},
doi = {10.1093/iwc/iww038},
keywords = {laboratory experiments, field studies, user centered design, scenario-based design, visualization systems and tools, personalization (WWW)},
issn = {0953-5438},
abstract = {This research addresses the general topic of 'keeping found things found' by investigating difficulties people encounter when revisiting webpages, and designing and evaluating a novel tool that addresses those difficulties. The research focused on occasional revisits-webpages that people have previously visited on only one day, a week or more ago (i.e. neither frequently nor recently). A 3-month logging study was combined with a laboratory experiment to identify 10 underlying causes of participants' revisiting failure. Overall, 61\% of the failures occurred when a webpage had originally been accessed via search results, was on a topic a participant often looked at or was on a known but large website. Then, we designed a novel visual Web history tool to address the causes of failure and implemented it as a Firefox add-on. The tool was evaluated in a 3-month field study, helped participants succeed on 96\% of revisits, and was also used by some participants to review and reminisce about their 'travels' online. Revised versions of the tool have been publicly released as the Firefox add-on MyWebSteps.},
author = {Do, TV and Ruddle, RA},
url = {https://doi.org/10.1093/iwc/iww038}
}
We propose the first system for live dynamic augmentation of human faces. Using projector?based illumination, we alter the appearance of human performers during novel performances. The key challenge of live augmentation is latency {–} an image is generated according to a specific pose, but is displayed on a different facial configuration by the time it is projected. Therefore, our system aims at reducing latency during every step of the process, from capture, through processing, to projection. Using infrared illumination, an optically and computationally aligned high?speed camera detects facial orientation as well as expression. The estimated expression blendshapes are mapped onto a lower dimensional space, and the facial motion and non?rigid deformation are estimated, smoothed and predicted through adaptive Kalman filtering. Finally, the desired appearance is generated interpolating precomputed offset textures according to time, global position, and expression. We have evaluated our system through an optimized CPU and GPU prototype, and demonstrated successful low latency augmentation for different performers and performances with varying facial play and motion speed. In contrast to existing methods, the presented system is the first method which fully supports dynamic facial projection mapping without the requirement of any physical tracking markers and incorporates facial expressions.
@article{wrro169265,
year = {2017},
doi = {10.1111/cgf.13128},
number = {2},
note = {{\copyright} 2017 The Author(s) Computer Graphics Forum {\copyright} 2017 The Eurographics Association and John Wiley \& Sons Ltd. Published by John Wiley \& Sons Ltd.
This is the peer reviewed version of the following article: Bermano, A.H., Billeter, M., Iwai, D. and Grundh{\"o}fer, A. (2017), Makeup Lamps: Live Augmentation of Human Faces via Projection. Computer Graphics Forum, 36: 311-323. , which has been published in final form at https://doi.org/10.1111/cgf.13128. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Use of Self-Archived Versions.},
publisher = {Wiley},
title = {Makeup Lamps: Live Augmentation of Human Faces via Projection},
journal = {Computer Graphics Forum},
month = {May},
volume = {36},
author = {AH Bermano and M Billeter and D Iwai and A Grundh{\"o}fer},
pages = {311--323},
url = {https://eprints.whiterose.ac.uk/id/eprint/169265/},
keywords = {Categories and Subject Descriptors (according to ACM CCS); H.5.1 [HCI]: Multimedia Information Systems{--}Artificial, augmented, and virtual realities; I.3.7 [Computer Graphics]: Three?Dimensional Graphics and Realism{--}Animation},
abstract = {We propose the first system for live dynamic augmentation of human faces. Using projector?based illumination, we alter the appearance of human performers during novel performances. The key challenge of live augmentation is latency {--} an image is generated according to a specific pose, but is displayed on a different facial configuration by the time it is projected. Therefore, our system aims at reducing latency during every step of the process, from capture, through processing, to projection. Using infrared illumination, an optically and computationally aligned high?speed camera detects facial orientation as well as expression. The estimated expression blendshapes are mapped onto a lower dimensional space, and the facial motion and non?rigid deformation are estimated, smoothed and predicted through adaptive Kalman filtering. Finally, the desired appearance is generated interpolating precomputed offset textures according to time, global position, and expression. We have evaluated our system through an optimized CPU and GPU prototype, and demonstrated successful low latency augmentation for different performers and performances with varying facial play and motion speed. In contrast to existing methods, the presented system is the first method which fully supports dynamic facial projection mapping without the requirement of any physical tracking markers and incorporates facial expressions.}
}
As data sets grow to exascale, automated data analysis and visu- alisation are increasingly important, to intermediate human under- standing and to reduce demands on disk storage via in situ anal- ysis. Trends in architecture of high performance computing sys- tems necessitate analysis algorithms to make effective use of com- binations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses rela- tionships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for com- puting the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analy- sis. While there is some work on distributed contour tree computa- tion, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with for- mal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with up to 10{$\times$} parallel speed up in OpenMP and up to 50{$\times$} speed up in NVIDIA Thrust.
@misc{wrro106038,
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
year = {2017},
booktitle = {LDAV 2016},
doi = {10.1109/LDAV.2016.7874312},
journal = {6th IEEE Symposium on Large Data Analysis and Visualization},
month = {March},
pages = {75--84},
publisher = {IEEE},
title = {Parallel Peak Pruning for Scalable SMP Contour Tree Computation},
author = {Carr, HA and Weber, GH and Sewell, CM and Ahrens, JP},
url = {https://eprints.whiterose.ac.uk/id/eprint/106038/},
isbn = {978-1-5090-5659-0},
keywords = {topological analysis, contour tree, merge tree, data parallel algorithms},
abstract = {As data sets grow to exascale, automated data analysis and visu- alisation are increasingly important, to intermediate human under- standing and to reduce demands on disk storage via in situ anal- ysis. Trends in architecture of high performance computing sys- tems necessitate analysis algorithms to make effective use of com- binations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses rela- tionships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for com- puting the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analy- sis. While there is some work on distributed contour tree computa- tion, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with for- mal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with up to 10{$\times$} parallel speed up in OpenMP and up to 50{$\times$} speed up in NVIDIA Thrust.}
}
Achieving a minimal latency within augmented reality (AR) systems is one of the most important factors to achieve a convincing visual impression. It is even more crucial for non-video augmentations such as dynamic projection mappings because in that case the superimposed imagery has to exactly match the dynamic real surface, which obviously cannot be directly influenced or delayed in its movement. In those cases, the inevitable latency is usually compensated for using prediction and extrapolation operations, which require accurate information about the occurring overall latency to exactly predict to the right time frame for the augmentation. Different strategies have been applied to accurately compute this latency. Since some of these AR systems operate within different spectral bands for input and output, it is not possible to apply latency measurement methods encoding time stamps directly into the presented output images as these might not be sensed by used input device.We present a generic latency measurement device which can be used to accurately measure the overall end-to-end latency of camera-based AR systems with an accuracy below one millisecond. It comprises a LED-based time stamp generator displaying the time as a gray code on spatially and spectrally multiple locations. It is controlled by a micro-controller and sensed by an external camera device observing the output display as well as the LED device at the same time.
@misc{wrro179726,
year = {2017},
booktitle = {2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)},
doi = {10.1109/ismar-adjunct.2016.0072},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
title = {A LED-Based IR/RGB End-to-End Latency Measurement Device},
publisher = {IEEE},
author = {M Billeter and G Rothlin and J Wezel and D Iwai and A Grundhofer},
journal = {2016 IEEE International Symposium on Mixed and Augmented Reality (ISMAR-Adjunct)},
month = {February},
abstract = {Achieving a minimal latency within augmented reality (AR) systems is one of the most important factors to achieve a convincing visual impression. It is even more crucial for non-video augmentations such as dynamic projection mappings because in that case the superimposed imagery has to exactly match the dynamic real surface, which obviously cannot be directly influenced or delayed in its movement. In those cases, the inevitable latency is usually compensated for using prediction and extrapolation operations, which require accurate information about the occurring overall latency to exactly predict to the right time frame for the augmentation. Different strategies have been applied to accurately compute this latency. Since some of these AR systems operate within different spectral bands for input and output, it is not possible to apply latency measurement methods encoding time stamps directly into the presented output images as these might not be sensed by used input device.We present a generic latency measurement device which can be used to accurately measure the overall end-to-end latency of camera-based AR systems with an accuracy below one millisecond. It comprises a LED-based time stamp generator displaying the time as a gray code on spatially and spectrally multiple locations. It is controlled by a micro-controller and sensed by an external camera device observing the output display as well as the LED device at the same time.},
keywords = {H.5.2 [HCI]: User Interfaces{--}Benchmarking},
url = {https://eprints.whiterose.ac.uk/id/eprint/179726/}
}
Lattice Quantum Chromodynamics (QCD) is an approach used by theo- retical physicists to model the strong nuclear force. This works at the sub-nuclear scale to bind quarks together into hadrons including the proton and neutron. One of the long term goals in lattice QCD is to produce a phase diagram of QCD matter as thermodynamic control parameters temperature and baryon chemical potential are varied. The ability to predict critical points in the phase diagram, known as phase transitions, is one of the on-going challenges faced by domain scientists. In this work we consider how multivariate topological visualisation techniques can be ap- plied to simulation data to help domain scientists predict the location of phase tran- sitions. In the process it is intended that applying these techniques to lattice QCD will strengthen the interpretation of output from multivariate topological algorithms, including the joint contour net. Lattice QCD presents an interesting opportunity for using these techniques as it offers a rich array of interacting scalar fields for anal- ysis; however, it also presents unique challenges due to its reliance on quantum mechanics to interpret the data.
@inproceedings{wrro114658,
year = {2017},
booktitle = {Topology-based Methods in Visualization 2017 (TopoInVis 2017)},
title = {Joint Contour Net analysis of lattice QCD data},
month = {February},
url = {https://eprints.whiterose.ac.uk/id/eprint/114658/},
author = {Thomas, DP and Borgo, R and Carr, HA and Hands, S},
abstract = {Lattice Quantum Chromodynamics (QCD) is an approach used by theo- retical physicists to model the strong nuclear force. This works at the sub-nuclear scale to bind quarks together into hadrons including the proton and neutron. One of the long term goals in lattice QCD is to produce a phase diagram of QCD matter as thermodynamic control parameters temperature and baryon chemical potential are varied. The ability to predict critical points in the phase diagram, known as phase transitions, is one of the on-going challenges faced by domain scientists. In this work we consider how multivariate topological visualisation techniques can be ap- plied to simulation data to help domain scientists predict the location of phase tran- sitions. In the process it is intended that applying these techniques to lattice QCD will strengthen the interpretation of output from multivariate topological algorithms, including the joint contour net. Lattice QCD presents an interesting opportunity for using these techniques as it offers a rich array of interacting scalar fields for anal- ysis; however, it also presents unique challenges due to its reliance on quantum mechanics to interpret the data.},
keywords = {Computational Topology; Joint Contour Net; Reeb Space}
}
This paper presents an efficient algorithm for the computation of the Reeb space of an input bivariate piecewise linear scalar function f defined on a tetrahedral mesh. By extending and generalizing algorithmic concepts from the univariate case to the bivariate one, we report the first practical, output-sensitive algorithm for the exact computation of such a Reeb space. The algorithm starts by identifying the Jacobi set of f , the bivariate analogs of critical points in the univariate case. Next, the Reeb space is computed by segmenting the input mesh along the new notion of Jacobi Fiber Surfaces, the bivariate analog of critical contours in the univariate case. We additionally present a simplification heuristic that enables the progressive coarsening of the Reeb space. Our algorithm is simple to implement and most of its computations can be trivially parallelized. We report performance numbers demonstrating orders of magnitude speedups over previous approaches, enabling for the first time the tractable computation of bivariate Reeb spaces in practice. Moreover, unlike range-based quantization approaches (such as the Joint Contour Net), our algorithm is parameter-free. We demonstrate the utility of our approach by using the Reeb space as a semi-automatic segmentation tool for bivariate data. In particular, we introduce continuous scatterplot peeling, a technique which enables the reduction of the cluttering in the continuous scatterplot, by interactively selecting the features of the Reeb space to project. We provide a VTK-based C++ implementation of our algorithm that can be used for reproduction purposes or for the development of new Reeb space based visualization techniques.
@article{wrro103600,
doi = {10.1109/TVCG.2016.2599017},
year = {2017},
number = {1},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
title = {Jacobi Fiber Surfaces for Bivariate Reeb Space Computation},
volume = {23},
month = {January},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {960--969},
author = {Tierny, J and Carr, HA},
url = {https://dx.doi.org/10.1109/TVCG.2016.2599017},
issn = {1077-2626},
keywords = {Topological data analysis, multivariate data, data segmentation},
abstract = {This paper presents an efficient algorithm for the computation of the Reeb space of an input bivariate piecewise linear scalar function f defined on a tetrahedral mesh. By extending and generalizing algorithmic concepts from the univariate case to the bivariate one, we report the first practical, output-sensitive algorithm for the exact computation of such a Reeb space. The algorithm starts by identifying the Jacobi set of f , the bivariate analogs of critical points in the univariate case. Next, the Reeb space is computed by segmenting the input mesh along the new notion of Jacobi Fiber Surfaces, the bivariate analog of critical contours in the univariate case. We additionally present a simplification heuristic that enables the progressive coarsening of the Reeb space. Our algorithm is simple to implement and most of its computations can be trivially parallelized. We report performance numbers demonstrating orders of magnitude speedups over previous approaches, enabling for the first time the tractable computation of bivariate Reeb spaces in practice. Moreover, unlike range-based quantization approaches (such as the Joint Contour Net), our algorithm is parameter-free. We demonstrate the utility of our approach by using the Reeb space as a semi-automatic segmentation tool for bivariate data. In particular, we introduce continuous scatterplot peeling, a technique which enables the reduction of the cluttering in the continuous scatterplot, by interactively selecting the features of the Reeb space to project. We provide a VTK-based C++ implementation of our algorithm that can be used for reproduction purposes or for the development of new Reeb space based visualization techniques.}
}
Multifield data are common in visualization. However, reducing these data to comprehensible geometry is a challenging problem. Fiber surfaces, an analogy of isosurfaces to bivariate volume data, are a promising new mechanism for understanding multifield volumes. In this work, we explore direct ray casting of fiber surfaces from volume data without any explicit geometry extraction. We sample directly along rays in domain space, and perform geometric tests in range space where fibers are defined, using a signed distance field derived from the control polygons. Our method requires little preprocess, and enables real-time exploration of data, dynamic modification and pixel-exact rendering of fiber surfaces, and support for higher-order interpolation in domain space. We demonstrate this approach on several bivariate datasets, including analysis of multi-field combustion data.
@article{wrro103601,
year = {2017},
doi = {10.1109/TVCG.2016.2599040},
number = {1},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Direct Multifield Volume Ray Casting of Fiber Surfaces},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
pages = {941--949},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {January},
volume = {23},
issn = {1077-2626},
keywords = {Multidimensional Data, Volume Rendering, Isosurface; Isosurfaces, Rendering (computer graphics), Casting, Power capacitors, Aerospace electronics, Acceleration, Transfer functions},
abstract = {Multifield data are common in visualization. However, reducing these data to comprehensible geometry is a challenging problem. Fiber surfaces, an analogy of isosurfaces to bivariate volume data, are a promising new mechanism for understanding multifield volumes. In this work, we explore direct ray casting of fiber surfaces from volume data without any explicit geometry extraction. We sample directly along rays in domain space, and perform geometric tests in range space where fibers are defined, using a signed distance field derived from the control polygons. Our method requires little preprocess, and enables real-time exploration of data, dynamic modification and pixel-exact rendering of fiber surfaces, and support for higher-order interpolation in domain space. We demonstrate this approach on several bivariate datasets, including analysis of multi-field combustion data.},
author = {Wu, K and Knoll, A and Isaac, BJ and Carr, HA and Pascucci, V},
url = {https://dx.doi.org/10.1109/TVCG.2016.2599040}
}
Electronic Health Records (EHRs) are an important asset for clinical research and decision making, but the utility of EHR data depends on its quality. In health, quality is typically investigated by using statistical methods to profile data. To complement established methods, we developed a web-based visualisation tool called MonAT Web Application (MonAT) for profiling the completeness and correctness of EHR. The tool was evaluated by four researchers using anthropometric data from the Born in Bradford Project (BiB Project), and this highlighted three advantages. The first was to understand how missingness varied across variables, and especially to do this for subsets of records. The second was to investigate whether certain variables for groups of records were sufficiently complete to be used in subsequent analysis. The third was to portray longitudinally the records for a given person, to improve outlier identification.
@misc{wrro110718,
publisher = {SCITEPRESS},
title = {MonAT: a VisualWeb-based Tool to Profile Health Data Quality},
volume = {5},
journal = {Proceedings of the 10th International Joint Conference on Biomedical Engineering Systems and Technologies (BIOSTEC 2017)},
pages = {26--34},
editor = {A Fred and EL Van den Broek and H Gamboa and M Vaz},
doi = {10.5220/0006114200260034},
booktitle = {10th International Conference on Health Informatics (HEALTHINF 2017)},
year = {2017},
abstract = {Electronic Health Records (EHRs) are an important asset for clinical research and decision making, but the utility of EHR data depends on its quality. In health, quality is typically investigated by using statistical methods to profile data. To complement established methods, we developed a web-based visualisation tool called MonAT Web Application (MonAT) for profiling the completeness and correctness of EHR. The tool was evaluated by four researchers using anthropometric data from the Born in Bradford Project (BiB Project), and this highlighted three advantages. The first was to understand how missingness varied across variables, and especially to do this for subsets of records. The second was to investigate whether certain variables for groups of records were sufficiently complete to be used in subsequent analysis. The third was to portray longitudinally the records for a given person, to improve outlier identification.},
keywords = {Data Quality, Visualization, Health Data, Longitudinal Data},
isbn = {978-989-758-213-4},
url = {https://eprints.whiterose.ac.uk/id/eprint/110718/},
author = {Noselli, M and Mason, D and Mohammed, MA and Ruddle, RA}
}
Topological simplification of scalar and vector fields is well-established as an effective method for analysing and visualising complex data sets. For multivariate (alternatively, multi-field) data, topological analysis requires simultaneous advances both mathematically and computationally. We propose a robust multivariate topology simplification method based on "lip"-pruning from the Reeb space. Mathematically, we show that the projection of the Jacobi set of multivariate data into the Reeb space produces a Jacobi structure that separates the Reeb space into simple components. We also show that the dual graph of these components gives rise to a Reeb skeleton that has properties similar to the scalar contour tree and Reeb graph, for topologically simple domains. We then introduce a range measure to give a scaling-invariant total ordering of the components or features that can be used for simplification. Computationally, we show how to compute Jacobi structure, Reeb skeleton, range and geometric measures in the Joint Contour Net (an approximation of the Reeb space) and that these can be used for visualisation similar to the contour tree or Reeb graph.
@article{wrro100068,
doi = {10.1016/j.comgeo.2016.05.006},
year = {2016},
note = {{\copyright} 2016 Elsevier B.V. This is an author produced version of a paper published in Computational Geometry. Uploaded in accordance with the publisher's self-archiving policy.},
publisher = {Elsevier},
title = {Multivariate Topology Simplification},
volume = {58},
month = {October},
journal = {Computational Geometry},
pages = {1--24},
url = {http://dx.doi.org/10.1016/j.comgeo.2016.05.006},
author = {Chattopadhyay, A and Carr, H and Duke, D and Geng, Z and Saeki, O},
abstract = {Topological simplification of scalar and vector fields is well-established as an effective method for analysing and visualising complex data sets. For multivariate (alternatively, multi-field) data, topological analysis requires simultaneous advances both mathematically and computationally. We propose a robust multivariate topology simplification method based on "lip"-pruning from the Reeb space. Mathematically, we show that the projection of the Jacobi set of multivariate data into the Reeb space produces a Jacobi structure that separates the Reeb space into simple components. We also show that the dual graph of these components gives rise to a Reeb skeleton that has properties similar to the scalar contour tree and Reeb graph, for topologically simple domains. We then introduce a range measure to give a scaling-invariant total ordering of the components or features that can be used for simplification. Computationally, we show how to compute Jacobi structure, Reeb skeleton, range and geometric measures in the Joint Contour Net (an approximation of the Reeb space) and that these can be used for visualisation similar to the contour tree or Reeb graph.},
keywords = {Simplification; Multivariate topology; Reeb space; Reeb skeleton; Multi-dimensional Reeb graph},
issn = {0925-7721}
}
Seismic data visualisation and analysis is an area of research interest for a lot of commercial and academic disciplines. It enables the geoscientists to understand structures underneath the earth. It is an important step in building subsurface geological models to identify hydrocarbon reservoirs and running geological simulations. Good quality watertight surface meshes are required for constructing these models for accurate identification and extraction of strata/horizons that contain carbon deposits such as fuel and gas. This research demonstrates extracting watertight geometric surfaces from 3D seismic volumes to improve horizon identification and extraction. Isosurfaces and Fiber Surfaces are proposed for extracting horizons from seismic data. Initial tests with isosurfaces have been conducted and further experiments using fiber furfaces are underway as next direction and discussed in sections 4.5 and 4.6.
@misc{wrro106638,
title = {Generating Watertight Isosurfaces from 3D Seismic Data},
year = {2016},
doi = {10.2312/cgvc.20162020},
publisher = {Eurographics Association for Computer Graphics},
booktitle = {Computer Graphics \& Visual Computing (CGVC) 2016},
month = {September},
journal = {Computer Graphics \& Visual Computing (CGVC) 2016},
isbn = {978-3-03868-022-2},
url = {http://doi.org/10.2312/cgvc.20162020},
author = {Khan, MS and Carr, H and Angus, D},
abstract = {Seismic data visualisation and analysis is an area of research interest for a lot of commercial and academic disciplines. It enables the geoscientists to understand structures underneath the earth. It is an important step in building subsurface geological models to identify hydrocarbon reservoirs and running geological simulations. Good quality watertight surface meshes are required for constructing these models for accurate identification and extraction of strata/horizons that contain carbon deposits such as fuel and gas. This research demonstrates extracting watertight geometric surfaces from 3D seismic volumes to improve horizon identification and extraction. Isosurfaces and Fiber Surfaces are proposed for extracting horizons from seismic data. Initial tests with isosurfaces have been conducted and further experiments using fiber furfaces are underway as next direction and discussed in sections 4.5 and 4.6.},
keywords = {Computer Graphics, Volume Visualisation, Isosurfaces, Watertight Meshes, Seismic Volumes, Seismic Horizon, Surface Handles}
}
As data sets increase in size beyond the petabyte, it is increasingly important to have automated methods for data analysis and visualisation. While topological analysis tools such as the contour tree and Morse-Smale complex are now well established, there is still a shortage of efficient parallel algorithms for their computation, in particular for massively data-parallel compu- tation on a SIMD model. We report the first data-parallel algorithm for computing the fully augmented contour tree, using a quantised computation model. We then extend this to provide a hybrid data-parallel / distributed algorithm allowing scaling beyond a single GPU or CPU, and provide results for its computation. Our implementation uses the portable data-parallel primitives provided by NVIDIA's Thrust library, allowing us to compile our same code for both GPUs and multi-core CPUs.
@misc{wrro107190,
month = {September},
journal = {Computer Graphics \& Visual Computing},
title = {Hybrid Data-Parallel Contour Tree Computation},
publisher = {The Eurographics Association},
editor = {C Turkay and TR Wan},
doi = {10.2312/cgvc.20161299},
booktitle = {CGVC 2016},
year = {2016},
author = {Carr, H and Sewell, C and Lo, L-T and Ahrens, J},
isbn = {978-3-03868-022-2},
url = {https://doi.org/10.2312/cgvc.20161299},
keywords = {topological analysis, contour tree, merge tree, data parallel algorithms},
abstract = {As data sets increase in size beyond the petabyte, it is increasingly important to have automated methods for data analysis and visualisation. While topological analysis tools such as the contour tree and Morse-Smale complex are now well established, there is still a shortage of efficient parallel algorithms for their computation, in particular for massively data-parallel compu- tation on a SIMD model. We report the first data-parallel algorithm for computing the fully augmented contour tree, using a quantised computation model. We then extend this to provide a hybrid data-parallel / distributed algorithm allowing scaling beyond a single GPU or CPU, and provide results for its computation. Our implementation uses the portable data-parallel primitives provided by NVIDIA's Thrust library, allowing us to compile our same code for both GPUs and multi-core CPUs.}
}
The present paper asks how can visualization help data scientists make sense of event sequences, and makes three main contributions. The first is a research agenda, which we divide into methods for presentation, interaction & computation, and scale-up. Second, we introduce the concept of Event Maps to help with scale-up, and illustrate coarse-, medium- and fine-grained Event Maps with electronic health record (EHR) data for prostate cancer. Third, in an experiment we investigated participants' ability to judge the similarity of event sequences. Contrary to previous research into categorical data, color and shape were better than position for encoding event type. However, even with simple sequences (5 events of 3 types in the target sequence), participants only got 88\% correct despite averaging 7.4 seconds to respond. This indicates that simple visualization techniques are not effective.
@misc{wrro106008,
booktitle = {The Event Event: Temporal \& Sequential Event Analysis - An IEEE VIS 2016 Workshop},
year = {2016},
title = {Methods and a research agenda for the evaluation of event sequence visualization techniques},
journal = {Proceedings of the IEEE VIS 2016 Workshop on Temporal \& Sequential Event Analysis.},
month = {September},
note = {This is an author produced version of a conference paper accepted by The Event Event: Temporal \& Sequential Event Analysis - An IEEE VIS 2016 Workshop, available online at http://eventevent.github.io/papers/EVENT\_2016\_paper\_9.pdf.},
abstract = {The present paper asks how can visualization help data scientists make sense of event sequences, and makes three main contributions. The first is a research agenda, which we divide into methods for presentation, interaction \& computation, and scale-up. Second, we introduce the concept of Event Maps to help with scale-up, and illustrate coarse-, medium- and fine-grained Event Maps with electronic health record (EHR) data for prostate cancer. Third, in an experiment we investigated participants' ability to judge the similarity of event sequences. Contrary to previous research into categorical data, color and shape were better than position for encoding event type. However, even with simple sequences (5 events of 3 types in the target sequence), participants only got 88\% correct despite averaging 7.4 seconds to respond. This indicates that simple visualization techniques are not effective.},
keywords = {Visualization; Electronic Health Records; Event Sequences; Research agenda; Evaluation},
url = {http://eventevent.github.io/papers/EVENT\%5f2016\%5fpaper\%5f9.pdf},
author = {Ruddle, RA and Bernard, J and May, T and L{\"u}cke-Tieke, H and Kohlhammer, J}
}
Interactive visualization plays a key role in the analysis of large datasets. It can help users to explore data, investigate hypotheses and find patterns. The easier and more tangible the interaction, the more likely it is to enhance understanding. This paper presents a tabletop Tangible User Interface (TUI) for interactive data visualization and offers two main contributions. First, we highlight the functional requirements for a data visualization interface and present a tabletop TUI that combines tangible objects with multi-touch interaction. Second, we compare the performance of the tabletop TUI and a multi-touch interface. The results show that participants found patterns faster with the TUI. This was due to the fact that they adopted a more effective strategy using the tabletop TUI than the multi-touch interface.
@misc{wrro92246,
note = {{\copyright} 2016 ACM. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in Proceedings of the TEI '16: Tenth International Conference on Tangible, Embedded, and Embodied Interaction, 2016 http://doi.acm.org/10.1145/2839462.2839464.},
booktitle = {10th International Conference on Tangible, Embedded and Embodied Interaction},
doi = {10.1145/2839462.2839464},
year = {2016},
pages = {279--286},
month = {February},
journal = {Proceedings of the TEI '16},
title = {Comparing Tangible and Multi-touch Interaction for Interactive Data Visualization Tasks},
publisher = {ACM},
keywords = {Tangible User Interface; tabletop display; visualization; tangible interaction; biological data; multi-touch},
abstract = {Interactive visualization plays a key role in the analysis of large datasets. It can help users to explore data, investigate hypotheses and find patterns. The easier and more tangible the interaction, the more likely it is to enhance understanding. This paper presents a tabletop Tangible User Interface (TUI) for interactive data visualization and offers two main contributions. First, we highlight the functional requirements for a data visualization interface and present a tabletop TUI that combines tangible objects with multi-touch interaction. Second, we compare the performance of the tabletop TUI and a multi-touch interface. The results show that participants found patterns faster with the TUI. This was due to the fact that they adopted a more effective strategy using the tabletop TUI than the multi-touch interface.},
author = {Al-Megren, S and Ruddle, RA},
isbn = {978-1-4503-3582-9},
url = {http://dx.doi.org/10.1145/2839462.2839464}
}
This paper describes the design and evaluation of two generations of an interface for navigating datasets of gigapixel images that pathologists use to diagnose cancer. The interface design is innovative because users panned with an overview:detail view scale difference that was up to 57 times larger than established guidelines, and 1 million pixel 'thumbnail' overviews that leveraged the real-estate of high resolution workstation displays. The research involved experts performing real work (pathologists diagnosing cancer), using datasets that were up to 3150 times larger than those used in previous studies that involved navigating images. The evaluation provides evidence about the effectiveness of the interfaces, and characterizes how experts navigate gigapixel images when performing real work. Similar interfaces could be adopted in applications that use other types of high-resolution images (e.g., remote sensing or highthroughput microscopy).
@article{wrro91558,
volume = {23},
journal = {ACM Transactions on Computer-Human Interaction},
month = {February},
title = {The design and evaluation of interfaces for navigating gigapixel images in digital pathology},
publisher = {Association for Computing Machinery (ACM)},
number = {1},
note = {{\copyright} ACM, 2016. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in ACM Transactions on Computer-Human Interaction, 23 (1), February 2016. http://doi.acm.org/10.1145/2834117.},
doi = {10.1145/2834117},
year = {2016},
abstract = {This paper describes the design and evaluation of two generations of an interface for navigating datasets of gigapixel images that pathologists use to diagnose cancer. The interface design is innovative because
users panned with an overview:detail view scale difference that was up to 57 times larger than established guidelines, and 1 million pixel 'thumbnail' overviews that leveraged the real-estate of high resolution
workstation displays. The research involved experts performing real work (pathologists diagnosing cancer), using datasets that were up to 3150 times larger than those used in previous studies that involved
navigating images. The evaluation provides evidence about the effectiveness of the interfaces, and characterizes how experts navigate gigapixel images when performing real work. Similar interfaces could
be adopted in applications that use other types of high-resolution images (e.g., remote sensing or highthroughput microscopy).},
issn = {1073-0516},
keywords = {Human-centered computing - Empirical studies in HCI; Humancentered computing - Interaction design theory, concepts and paradigms; Human-centered computing - Visualization systems and tools; Gigapixel images, navigation, pathology, overview+detail, zoomable user interface},
url = {http://dx.doi.org/10.1145/2834117},
author = {Ruddle, RA and Thomas, RG and Randell, R and Quirke, P and Treanor, D}
}
Scalar topology in the form of Morse theory has provided computational tools that analyze and visualize data from scientific and engineering tasks. Contracting isocontours to single points encapsulates variations in isocontour connectivity in the Reeb graph. For multivariate data, isocontours generalize to fibers-inverse images of points in the range, and this area is therefore known as fiber topology. However, fiber topology is less fully developed than Morse theory, and current efforts rely on manual visualizations. This paper presents how to accelerate and semi-automate this task through an interface for visualizing fiber singularities of multivariate functions R3 {$\rightarrow$} R2. This interface exploits existing conventions of fiber topology, but also introduces a 3D view based on the extension of Reeb graphs to Reeb spaces. Using the Joint Contour Net, a quantized approximation of the Reeb space, this accelerates topological visualization and permits online perturbation to reduce or remove degeneracies in functions under study. Validation of the interface is performed by assessing whether the interface supports the mathematical workflow both of experts and of less experienced mathematicians.
@article{wrro88921,
volume = {22},
month = {January},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {945--954},
publisher = {Institute of Electrical and Electronics Engineers},
title = {Interactive Visualization for Singular Fibers of Functions f : R3 {$\rightarrow$} R2},
note = {{\copyright} 2015, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
number = {1},
doi = {10.1109/TVCG.2015.2467433},
year = {2016},
issn = {1077-2626},
keywords = {singular fibers; fiber topology; mathematical visualization; design study},
abstract = {Scalar topology in the form of Morse theory has provided computational tools that analyze and visualize data from scientific and engineering tasks. Contracting isocontours to single points encapsulates variations in isocontour connectivity in the Reeb graph. For multivariate data, isocontours generalize to fibers-inverse images of points in the range, and this area is therefore known as fiber topology. However, fiber topology is less fully developed than Morse theory, and current efforts rely on manual visualizations.
This paper presents how to accelerate and semi-automate this task through an interface for visualizing fiber singularities of multivariate functions R3 {$\rightarrow$} R2. This interface exploits existing conventions of fiber topology, but also introduces a 3D view based on the extension of Reeb graphs to Reeb spaces. Using the Joint Contour Net, a quantized approximation of the Reeb space, this accelerates topological visualization and permits online perturbation to reduce or remove degeneracies in functions under study. Validation of the interface is performed by assessing whether the interface supports the mathematical workflow both of experts and of less experienced mathematicians.},
author = {Sakurai, D and Saeki, O and Carr, H and Wu, H-Y and Yamamoto, T and Duke, D and Takahashi, S},
url = {http://dx.doi.org/10.1109/TVCG.2015.2467433}
}
@article{wrro123473,
month = {October},
journal = {Journal of Pathology Informatics},
volume = {6},
title = {Response to Rojo and Bueno: "Analysis of the impact of high resolution monitors in digital pathology"},
publisher = {Medknow Publications},
number = {1},
note = {{\copyright} 2015 Journal of Pathology Informatics. This is an open access article distributed under the terms of the Creative Commons Attribution-NonCommercial-ShareAlike 3.0 License, which allows others to remix, tweak, and build upon the work non-commercially, as long as the author is credited and the new creations are licensed under the identical terms.},
year = {2015},
doi = {10.4103/2153-3539.168522},
issn = {2229-5089},
author = {Randell, R and Ruddle, RA and Thomas, RG and Treanor, D},
url = {https://eprints.whiterose.ac.uk/id/eprint/123473/}
}
Background: Biomedical image processing methods require users to optimise input parameters to ensure high-quality output. This presents two challenges. First, it is difficult to optimise multiple input parameters for multiple input images. Second, it is difficult to achieve an understanding of underlying algorithms, in particular, relationships between input and output. Results: We present a visualisation method that transforms users' ability to understand algorithm behaviour by integrating input and output and supporting exploration of their relationships. We discuss its application to a colour deconvolution technique for stained histology images and show how it enabled a domain expert to identify suitable parameter values for the deconvolution of two types of images, and metrics to quantify deconvolution performance. It also enabled a breakthrough in understanding by invalidating an underlying assumption about the algorithm. Conclusions: The visualisation method presented here provides users with a capability to combine multiple inputs and outputs in biomedical image processing that is not provided by previous analysis software. The analysis supported by our method is not feasible with conventional trial-and-error approaches.
@article{wrro86634,
number = {S11},
note = {{\copyright} 2015 Pretorius et al; licensee BioMed Central Ltd. This is an Open Access article distributed under the terms of the Creative Commons Attribution License (http://creativecommons.org/licenses/by/2.0), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work is properly cited},
year = {2015},
doi = {10.1186/1471-2105-16-S11-S9},
month = {August},
journal = {BMC Bioinformatics},
volume = {16},
title = {Visual parameter optimisation for biomedical image processing},
publisher = {BioMed Central},
issn = {1471-2105},
keywords = {visualisation; parameter optimisation; image analysis; image processing; biology; biomedicine; histology; design study},
abstract = {Background: Biomedical image processing methods require users to optimise input parameters to ensure high-quality output. This presents two challenges. First, it is difficult to optimise multiple input parameters for multiple input images. Second, it is difficult to achieve an understanding of underlying algorithms, in particular, relationships between input and output.
Results: We present a visualisation method that transforms users' ability to understand algorithm behaviour by integrating input and output and supporting exploration of their relationships. We discuss its application to a colour deconvolution technique for stained histology images and show how it enabled a domain expert to identify suitable parameter values for the deconvolution of two types of images, and metrics to quantify deconvolution performance. It also enabled a breakthrough in understanding by invalidating an underlying assumption about the algorithm.
Conclusions: The visualisation method presented here provides users with a capability to combine multiple inputs and outputs in biomedical image processing that is not provided by previous analysis software. The analysis supported by our method is not feasible with conventional trial-and-error approaches.},
author = {Pretorius, AJ and Zhou, Y and Ruddle, RA},
url = {http://dx.doi.org/10.1186/1471-2105-16-S11-S9}
}
High-resolution, wall-size displays often rely on bespoke software for performing interactive data visualisation, leading to interface designs with little or no consistency between displays. This makes adoption for novice users difficult when migrating from desktop environments. However, desktop interface techniques (such as task- and menu- bars) do not scale well and so cannot be relied on to drive the design of large display interfaces. In this paper we present HiReD, a multi-window environment for cluster-driven displays. As well as describing the technical details of the system, we also describe a suite of low-precision interface techniques that aim to provide a familiar desktop environment to the user while overcoming the scalability issues of high-resolution displays. We hope that these techniques, as well as the implementation of HiReD itself, can encourage good practice in the design and development of future interfaces for high-resolution, wall-size displays.
@misc{wrro91514,
publisher = {Association for Computing Machinery},
title = {HiReD: a high-resolution multi-window visualisation environment for cluster-driven displays},
month = {August},
journal = {EICS '15 Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
pages = {2 -- 11},
booktitle = {7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems},
doi = {10.1145/2774225.2774850},
year = {2015},
note = {{\copyright} ACM, 2015. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in EICS '15 Proceedings of the 7th ACM SIGCHI Symposium on Engineering Interactive Computing Systems (23 Jul 2015) http://dx.doi.org/10.1145/2774225.2774850.},
url = {http://dx.doi.org/10.1145/2774225.2774850},
isbn = {978-1-4503-3646-8},
author = {Rooney, C and Ruddle, RA},
abstract = {High-resolution, wall-size displays often rely on bespoke software for performing interactive data visualisation, leading to interface designs with little or no consistency between displays. This makes adoption for novice users difficult when migrating from desktop environments. However, desktop interface techniques (such as task- and menu- bars) do not scale well and so cannot be relied on to drive the design of large display interfaces. In this paper we present HiReD, a multi-window environment for cluster-driven displays. As well as describing the technical details of the system, we also describe a suite of low-precision interface techniques that aim to provide a familiar desktop environment to the user while overcoming the scalability issues of high-resolution displays. We hope that these techniques, as well as the implementation of HiReD itself, can encourage good practice in the design and development of future interfaces for high-resolution, wall-size displays.},
keywords = {Powerwall; multi-window environment; user interface; high-resolution; low-precision; H.5.2.; user interfaces; windowing systems}
}
Scientific visualization has many effective methods for examining and exploring scalar and vector fields, but rather fewer for bivariate fields. We report the first general purpose approach for the interactive extraction of geometric separating surfaces in bivariate fields. This method is based on fiber surfaces: surfaces constructed from sets of fibers, the multivariate analogues of isolines. We show simple methods for fiber surface definition and extraction. In particular, we show a simple and efficient fiber surface extraction algorithm based on Marching Cubes. We also show how to construct fiber surfaces interactively with geometric primitives in the range of the function. We then extend this to build user interfaces that generate parameterized families of fiber surfaces with respect to arbitrary polygons. In the special case of isovalue-gradient plots, fiber surfaces capture features geometrically for quantitative analysis that have previously only been analysed visually and qualitatively using multi-dimensional transfer functions in volume rendering. We also demonstrate fiber surface extraction on a variety of bivariate data.
@article{wrro86871,
year = {2015},
doi = {10.1111/cgf.12636},
note = {{\copyright} 2015 The Author(s) Computer Graphics Forum {\copyright} 2015 The Eurographics Association and John Wiley \& Sons Ltd. Published by John Wiley \& Sons Ltd. This is the peer reviewed version of the following article: Carr, H., Geng, Z., Tierny, J., Chattopadhyay, A. and Knoll, A. (2015), Fiber Surfaces: Generalizing Isosurfaces to Bivariate Data. Computer Graphics Forum, 34: 241-250. doi: 10.1111/cgf.12636, which has been published in final form at http://dx.doi.org/10.1111/cgf.12636. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Self-Archiving.},
number = {3},
publisher = {Wiley},
title = {Fiber surfaces: generalizing isosurfaces to bivariate data},
journal = {Computer Graphics Forum},
month = {June},
volume = {34},
pages = {241--250},
abstract = {Scientific visualization has many effective methods for examining and exploring scalar and vector fields, but rather fewer for bivariate fields. We report the first general purpose approach for the interactive extraction of geometric separating surfaces in bivariate fields. This method is based on fiber surfaces: surfaces constructed from sets of fibers, the multivariate analogues of isolines. We show simple methods for fiber surface definition and extraction. In particular, we show a simple and efficient fiber surface extraction algorithm based on Marching Cubes. We also show how to construct fiber surfaces interactively with geometric primitives in the range of the function. We then extend this to build user interfaces that generate parameterized families of fiber surfaces with respect to arbitrary polygons. In the special case of isovalue-gradient plots, fiber surfaces capture features geometrically for quantitative analysis that have previously only been analysed visually and qualitatively using multi-dimensional transfer functions in volume rendering. We also demonstrate fiber surface extraction on a variety of bivariate data.},
issn = {0167-7055},
url = {http://dx.doi.org/10.1111/cgf.12636},
author = {Carr, HA and Geng, Z and Tierny, J and Chattopadhyay, A and Knoll, A}
}
Airborne Laser Scanning (ALS) was introduced to provide rapid, high resolution scans of landforms for computational processing. More recently, ALS has been adapted for scanning urban areas. The greater complexity of urban scenes necessitates the development of novel methods to exploit urban ALS to best advantage. This paper presents occlusion images: a novel technique that exploits the geometric complexity of the urban environment to improve visualisation of small details for better feature recognition. The algorithm is based on an inversion of traditional occlusion techniques.
@article{wrro97575,
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
month = {June},
volume = {104},
pages = {77--87},
publisher = {Elsevier},
title = {Visualisation of urban airborne laser scanning data with occlusion images},
note = {{\copyright} 2015 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. This is an author produced version of a paper published in ISPRS Journal of Photogrammetry and Remote Sensing. Uploaded in accordance with the publisher's self-archiving policy.},
year = {2015},
doi = {10.1016/j.isprsjprs.2015.01.014},
keywords = {Airborne laser scanning; LiDAR; Ambient occlusion; Urban modelling; Elevation image; Visualisation},
issn = {0924-2716},
abstract = {Airborne Laser Scanning (ALS) was introduced to provide rapid, high resolution scans of landforms for computational processing. More recently, ALS has been adapted for scanning urban areas. The greater complexity of urban scenes necessitates the development of novel methods to exploit urban ALS to best advantage. This paper presents occlusion images: a novel technique that exploits the geometric complexity of the urban environment to improve visualisation of small details for better feature recognition. The algorithm is based on an inversion of traditional occlusion techniques.},
author = {Hinks, T and Carr, H and Gharibi, H and Laefer, DF},
url = {https://dx.doi.org/10.1016/j.isprsjprs.2015.01.014}
}
Cell lineages describe the developmental history of cell populations and are produced by combining time-lapse imaging and image processing. Biomedical researchers study cell lineages to understand fundamental processes, such as cell differentiation and the pharmacodynamic action of anticancer agents. Yet, the interpretation of cell lineages is hindered by their complexity and insufficient capacity for visual analysis. We present a novel approach for interactive visualisation of cell lineages. Based on an understanding of cellular biology and live-cell imaging methodology, we identify three requirements: multimodality (cell lineages combine spatial, temporal, and other properties), symmetry (related to lineage branching structure), and synchrony (related to temporal alignment of cellular events). We address these by combining visual summaries of the spatiotemporal behaviour of an arbitrary number of lineages, including variation from average behaviour, with node-link representations that emphasise the presence or absence of symmetry and synchrony. We illustrate the merit of our approach by presenting a real-world case study where the cytotoxic action of the anticancer drug topotecan was determined.
@article{wrro84341,
pages = {21 -- 30},
journal = {Computer Graphics Forum},
month = {June},
volume = {34},
title = {Cell lineage visualisation},
publisher = {Wiley},
note = {(c) 2015 The Author(s). Computer Graphics Forum (c) 2015 The Eurographics Association and John Wiley \& Sons Ltd. Published by John Wiley \& Sons Ltd. This is the peer reviewed version of the following article: [Pretorius, AJ, Khan, IA and Errington, RJ (2015) Cell lineage visualisation. Computer Graphics Forum, 34 (3). 21 - 30. ISSN 0167-7055, which has been published in final form at http://dx.doi.org/10.1111/cgf.12614. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Self-Archiving.},
number = {3},
year = {2015},
booktitle = {Eurographics Conference on Visualization (EuroVis) 2015},
doi = {10.1111/cgf.12614},
editor = {H Carr and K-L Ma},
url = {http://dx.doi.org/10.1111/cgf.12614},
author = {Pretorius, AJ and Khan, IA and Errington, RJ},
abstract = {Cell lineages describe the developmental history of cell populations and are produced by combining time-lapse imaging and image processing. Biomedical researchers study cell lineages to understand fundamental processes, such as cell differentiation and the pharmacodynamic action of anticancer agents. Yet, the interpretation of cell lineages is hindered by their complexity and insufficient capacity for visual analysis. We present a novel approach for interactive visualisation of cell lineages. Based on an understanding of cellular biology and live-cell imaging methodology, we identify three requirements: multimodality (cell lineages combine spatial, temporal, and other properties), symmetry (related to lineage branching structure), and synchrony (related to temporal alignment of cellular events). We address these by combining visual summaries of the spatiotemporal behaviour of an arbitrary number of lineages, including variation from average behaviour, with node-link representations that emphasise the presence or absence of symmetry and synchrony. We illustrate the merit of our approach by presenting a real-world case study where the cytotoxic action of the anticancer drug topotecan was determined.},
keywords = {visualisation; data visualisation; information visualisation; biological data; cell lineage; cell biology},
issn = {0167-7055}
}
Large, high-resolution displays (LHRDs) allow orders of magnitude more data to be visualized at a time than ordinary computer displays. Previous research is inconclusive about the circumstances under which LHRDs are beneficial and lacks behavioural data to explain inconsistencies in the findings. We conducted an experiment in which participants searched maps for densely or sparsely distributed targets, using 2 million pixel (0.4m {$\times$} 0.3m), 12 million pixel (1.3m {$\times$} 0.7m) and 54 million pixel displays (3.0m {$\times$} 1.3m). Display resolution did not affect the speed at which dense targets were found, but participants found sparse targets in easily identifiable regions of interest 30\% faster with the 54-million pixel display than with the other displays. This was because of the speed advantage conferred by physical navigation and the fact that the whole dataset fitted onto the 54-million pixel display. Contrary to expectations, participants found targets at a similar speed and interacted in a similar manner (mostly short panning movements) with the 2- and 12-million pixel displays even though the latter provided more opportunity for physical navigation, though this may have been because panning used velocity-based control. We are applying these findings to the design of a virtual microscope for the diagnosis of diseases such as cancer.
@article{wrro85118,
pages = {137 -- 147},
volume = {14},
journal = {Information Visualization},
month = {April},
title = {Performance and interaction behaviour during visual search on large, high-resolution displays.},
publisher = {SAGE},
number = {2},
note = {(c) 2013, The Author(s). This is an author produced version of a paper published in Information Visualization. Uploaded in accordance with the publisher's self-archiving policy.},
doi = {10.1177/1473871613500978},
year = {2015},
abstract = {Large, high-resolution displays (LHRDs) allow orders of magnitude more data to be visualized at a time than ordinary computer displays. Previous research is inconclusive about the circumstances under which LHRDs are beneficial and lacks behavioural data to explain inconsistencies in the findings. We conducted an experiment in which participants searched maps for densely or sparsely distributed targets, using 2 million pixel (0.4m {$\times$} 0.3m), 12 million pixel (1.3m {$\times$} 0.7m) and 54 million pixel displays (3.0m {$\times$} 1.3m). Display resolution did not affect the speed at which dense targets were found, but participants found sparse targets in easily identifiable regions of interest 30\% faster with the 54-million pixel display than with the other displays. This was because of the speed advantage conferred by physical navigation and the fact that the whole dataset fitted onto the 54-million pixel display. Contrary to expectations, participants found targets at a similar speed and interacted in a similar manner (mostly short panning movements) with the 2- and 12-million pixel displays even though the latter provided more opportunity for physical navigation, though this may have been because panning used velocity-based control. We are applying these findings to the design of a virtual microscope for the diagnosis of diseases such as cancer.},
keywords = {Large high-resolution displays, gigapixel images, interaction behaviour, physical navigation, visual search, histopathology},
issn = {1473-8716},
url = {http://dx.doi.org/10.1177/1473871613500978},
author = {Ruddle, RA and Thomas, RG and Randell, RS and Quirke, P and Treanor, D}
}
Understanding the mechanisms of induced nuclear fission for a broad range of neutron energies could help resolve fundamental science issues, such as the formation of elements in the universe, but could have also a large impact on societal applications in energy production or nuclear waste management. The goal of this paper is to set up the foundations of a microscopic theory to study the static aspects of induced fission as a function of the excitation energy of the incident neutron, from thermal to fast neutrons. To account for the high excitation energy of the compound nucleus, we employ a statistical approach based on finite temperature nuclear density functional theory with Skyrme energy densities, which we benchmark on the Pu239(n,f) reaction. We compute the evolution of the least-energy fission pathway across multidimensional potential energy surfaces with up to five collective variables as a function of the nuclear temperature and predict the evolution of both the inner and the outer fission barriers as a function of the excitation energy of the compound nucleus. We show that the coupling to the continuum induced by the finite temperature is negligible in the range of neutron energies relevant for many applications of neutron-induced fission. We prove that the concept of quantum localization introduced recently can be extended to T{\ensuremath{>}}0, and we apply the method to study the interaction energy and total kinetic energy of fission fragments as a function of the temperature for the most probable fission. While large uncertainties in theoretical modeling remain, we conclude that a finite temperature nuclear density functional may provide a useful framework to obtain accurate predictions of fission fragment properties.
@article{wrro84783,
number = {3},
note = {{\copyright} 2015, American Physical Society. Reproduced in accordance with the publisher's self-archiving policy.},
year = {2015},
doi = {10.1103/PhysRevC.91.034327},
month = {March},
journal = {Physical Review C: Nuclear Physics},
volume = {91},
publisher = {American Physical Society},
title = {Description of induced nuclear fission with Skyrme energy functionals. II. Finite temperature effects},
issn = {0556-2813},
keywords = {Fission; Topology; Joint Contour Net},
abstract = {Understanding the mechanisms of induced nuclear fission for a broad range of neutron energies could help resolve fundamental science issues, such as the formation of elements in the universe, but could have also a large impact on societal applications in energy production or nuclear waste management. The goal of this paper is to set up the foundations of a microscopic theory to study the static aspects of induced fission as a function of the excitation energy of the incident neutron, from thermal to fast neutrons. To account for the high excitation energy of the compound nucleus, we employ a statistical approach based on finite temperature nuclear density functional theory with Skyrme energy densities, which we benchmark on the Pu239(n,f) reaction. We compute the evolution of the least-energy fission pathway across multidimensional potential energy surfaces with up to five collective variables as a function of the nuclear temperature and predict the evolution of both the inner and the outer fission barriers as a function of the excitation energy of the compound nucleus. We show that the coupling to the continuum induced by the finite temperature is negligible in the range of neutron energies relevant for many applications of neutron-induced fission. We prove that the concept of quantum localization introduced recently can be extended to T{\ensuremath{>}}0, and we apply the method to study the interaction energy and total kinetic energy of fission fragments as a function of the temperature for the most probable fission. While large uncertainties in theoretical modeling remain, we conclude that a finite temperature nuclear density functional may provide a useful framework to obtain accurate predictions of fission fragment properties.},
author = {Schunck, N and Duke, DJ and Carr, H},
url = {http://dx.doi.org/10.1103/PhysRevC.91.034327}
}
Cellular pathologists are doctors who diagnose disease by using a microscope to examine glass slides containing thin sections of human tissue. These slides can be digitised and viewed on a computer, promising benefits in both efficiency and safety. Despite this, uptake of digital pathology for diagnostic work has been slow, with use largely restricted to second opinions, education, and external quality assessment schemes. To understand the barriers and facilitators to the introduction of digital pathology, we have undertaken an interview study with nine consultant pathologists. Interviewees were able to identify a range of potential benefits of digital pathology, with a particular emphasis on easier access to slides. Amongst the barriers to use, a key concern was lack of familiarity, not only in terms of becoming familiar with the technology but learning how to adjust their diagnostic skills to this new medium. The findings emphasise the need to ensure adequate training and support and the potential benefit of allowing parallel use of glass slides and digital while pathologists are on the learning curve.
@article{wrro86602,
doi = {10.3233/978-1-61499-564-7-443},
year = {2015},
note = {{\copyright} 2015, Author(s). This is an author produced version of a paper published in Studies in Health Technology and Informatics. Uploaded in accordance with the publisher's self-archiving policy.},
title = {Barriers and facilitators to the introduction of digital pathology for diagnostic work},
publisher = {IOS Press},
pages = {443 -- 447},
volume = {216},
journal = {Studies in Health Technology and Informatics},
month = {March},
url = {http://dx.doi.org/10.3233/978-1-61499-564-7-443},
author = {Randell, RS and Ruddle, RA and Treanor, D},
abstract = {Cellular pathologists are doctors who diagnose disease by using a microscope to examine glass slides containing thin sections of human tissue. These slides can be digitised and viewed on a computer, promising benefits in both efficiency and safety. Despite this, uptake of digital pathology for diagnostic work has been slow, with use largely restricted to second opinions, education, and external quality assessment schemes. To understand the barriers and facilitators to the introduction of digital pathology, we have undertaken an interview study with nine consultant pathologists. Interviewees were able to identify a range of potential benefits of digital pathology, with a particular emphasis on easier access to slides. Amongst the barriers to use, a key concern was lack of familiarity, not only in terms of becoming familiar with the technology but learning how to adjust their diagnostic skills to this new medium. The findings emphasise the need to ensure adequate training and support and the potential benefit of allowing parallel use of glass slides and digital while pathologists are on the learning curve.},
issn = {0926-9630},
keywords = {Informatics; Pathology; Microscopy; Qualitative Research; Learning Curve}
}
Performing diagnoses using virtual slides can take pathologists significantly longer than with glass slides, presenting a significant barrier to the use of virtual slides in routine practice. Given the benefits in pathology workflow efficiency and safety that virtual slides promise, it is important to understand reasons for this difference and identify opportunities for improvement. The effect of display resolution on time to diagnosis with virtual slides has not previously been explored. The aim of this study was to assess the effect of display resolution on time to diagnosis with virtual slides. Nine pathologists participated in a counterbalanced crossover study, viewing axillary lymph node slides on a microscope, a 23-in 2.3-megapixel single-screen display and a three-screen 11-megapixel display consisting of three 27-in displays. Time to diagnosis and time to first target were faster on the microscope than on the single and three-screen displays. There was no significant difference between the microscope and the three-screen display in time to first target, while the time taken on the single-screen display was significantly higher than that on the microscope. The results suggest that a digital pathology workstation with an increased number of pixels may make it easier to identify where cancer is located in the initial slide overview, enabling quick location of diagnostically relevant regions of interest. However, when a comprehensive, detailed search of a slide has to be made, increased resolution may not offer any additional benefit.
@article{wrro80899,
year = {2015},
doi = {10.1007/s10278-014-9726-8},
number = {1},
note = {{\copyright} Society for Imaging Informatics in Medicine 2014. This is an author produced version of a paper accepted for publication in Journal of Digital Imaging. Uploaded in accordance with the publisher's self-archiving policy. The final publication is available at Springer via http://dx.doi.org/10.1007/s10278-014-9726-8},
publisher = {Springer Verlag},
title = {Effect of display resolution on time to diagnosis with virtual pathology slides in a systematic search task},
journal = {Journal of Digital Imaging},
volume = {28},
pages = {68 -- 76},
abstract = {Performing diagnoses using virtual slides can take pathologists significantly longer than with glass slides, presenting a significant barrier to the use of virtual slides in routine practice. Given the benefits in pathology workflow efficiency and safety that virtual slides promise, it is important to understand reasons for this difference and identify opportunities for improvement. The effect of display resolution on time to diagnosis with virtual slides has not previously been explored. The aim of this study was to assess the effect of display resolution on time to diagnosis with virtual slides. Nine pathologists participated in a counterbalanced crossover study, viewing axillary lymph node slides on a microscope, a 23-in 2.3-megapixel single-screen display and a three-screen 11-megapixel display consisting of three 27-in displays. Time to diagnosis and time to first target were faster on the microscope than on the single and three-screen displays. There was no significant difference between the microscope and the three-screen display in time to first target, while the time taken on the single-screen display was significantly higher than that on the microscope. The results suggest that a digital pathology workstation with an increased number of pixels may make it easier to identify where cancer is located in the initial slide overview, enabling quick location of diagnostically relevant regions of interest. However, when a comprehensive, detailed search of a slide has to be made, increased resolution may not offer any additional benefit.},
issn = {0897-1889},
keywords = {Digital pathology; Pathology; Virtual slides; Whole slide imaging; Telepathology; Time to diagnosis},
url = {http://dx.doi.org/10.1007/s10278-014-9726-8},
author = {Randell, R and Ambepitiya, T and Mello-Thoms, C and Ruddle, RA and Brettle, D and Thomas, RG and Treanor, D}
}
Eighty years after its experimental discovery, a description of induced nuclear fission based solely on the interactions between neutrons and protons and quantum many-body methods still poses formidable challenges. The goal of this paper is to contribute to the development of a predictive microscopic framework for the accurate calculation of static properties of fission fragments for hot fission and thermal or slow neutrons. To this end, we focus on the Pu239(n,f) reaction and employ nuclear density functional theory with Skyrme energy densities. Potential energy surfaces are computed at the Hartree-Fock-Bogoliubov approximation with up to five collective variables. We find that the triaxial degree of freedom plays an important role, both near the fission barrier and at scission. The impact of the parametrization of the Skyrme energy density and the role of pairing correlations on deformation properties from the ground state up to scission are also quantified. We introduce a general template for the quantitative description of fission fragment properties. It is based on the careful analysis of scission configurations, using both advanced topological methods and recently proposed quantum many-body techniques. We conclude that an accurate prediction of fission fragment properties at low incident neutron energies, although technologically demanding, should be within the reach of current nuclear density functional theory.
@article{wrro81690,
doi = {10.1103/PhysRevC.90.054305},
year = {2014},
number = {5},
note = {(c) 2014, American Physical Society. Reproduced in accordance with the publisher's self-archiving policy.},
publisher = {American Physical Society},
title = {Description of induced nuclear fission with Skyrme energy functionals: static potential energy surfaces and fission fragment properties},
volume = {90},
journal = {Physical Review C: Nuclear Physics},
month = {November},
author = {Schunck, N and Duke, DJ and Carr, H and Knoll, A},
url = {http://dx.doi.org/10.1103/PhysRevC.90.054305},
issn = {0556-2813},
abstract = {Eighty years after its experimental discovery, a description of induced nuclear fission based solely on the interactions between neutrons and protons and quantum many-body methods still poses formidable challenges. The goal of this paper is to contribute to the development of a predictive microscopic framework for the accurate calculation of static properties of fission fragments for hot fission and thermal or slow neutrons. To this end, we focus on the Pu239(n,f) reaction and employ nuclear density functional theory with Skyrme energy densities. Potential energy surfaces are computed at the Hartree-Fock-Bogoliubov approximation with up to five collective variables. We find that the triaxial degree of freedom plays an important role, both near the fission barrier and at scission. The impact of the parametrization of the Skyrme energy density and the role of pairing correlations on deformation properties from the ground state up to scission are also quantified. We introduce a general template for the quantitative description of fission fragment properties. It is based on the careful analysis of scission configurations, using both advanced topological methods and recently proposed quantum many-body techniques. We conclude that an accurate prediction of fission fragment properties at low incident neutron energies, although technologically demanding, should be within the reach of current nuclear density functional theory.}
}
Digital pathology promises a number of benefits in efficiency in surgical pathology, yet the longer time required to review a virtual slide than a glass slide currently represents a significant barrier to the routine use of digital pathology. We aimed to create a novel workstation that enables pathologists to view a case as quickly as on the conventional microscope. The Leeds Virtual Microscope (LVM) was evaluated using a mixed factorial experimental design. Twelve consultant pathologists took part, each viewing one long cancer case (12-25 slides) on the LVM and one on a conventional microscope. Total time taken and diagnostic confidence were similar for the microscope and LVM, as was the mean slide viewing time. On the LVM, participants spent a significantly greater proportion of the total task time viewing slides and revisited slides more often. The unique design of the LVM, enabling real-time rendering of virtual slides while providing users with a quick and intuitive way to navigate within and between slides, makes use of digital pathology in routine practice a realistic possibility. With further practice with the system, diagnostic efficiency on the LVM is likely to increase yet more.
@article{wrro80933,
note = {{\copyright} 2014, WB Saunders. This is an author produced version of a paper published in Human Pathology. Uploaded in accordance with the publisher's self-archiving policy.},
number = {10},
year = {2014},
doi = {10.1016/j.humpath.2014.06.017},
pages = {2101--2106},
journal = {Human Pathology},
month = {October},
volume = {45},
title = {Diagnosis of major cancer resection specimens with virtual slides: Impact of a novel digital pathology workstation},
publisher = {W.B. Saunders},
author = {Randell, R and Ruddle, RA and Thomas, RG and Mello-Thoms, C and Treanor, D},
url = {http://dx.doi.org/10.1016/j.humpath.2014.06.017},
issn = {0046-8177},
keywords = {Digital pathology; Telepathology; Time to diagnosis; Virtual slides; Whole slide imaging},
abstract = {Digital pathology promises a number of benefits in efficiency in surgical pathology, yet the longer time required to review a virtual slide than a glass slide currently represents a significant barrier to the routine use of digital pathology. We aimed to create a novel workstation that enables pathologists to view a case as quickly as on the conventional microscope. The Leeds Virtual Microscope (LVM) was evaluated using a mixed factorial experimental design. Twelve consultant pathologists took part, each viewing one long cancer case (12-25 slides) on the LVM and one on a conventional microscope. Total time taken and diagnostic confidence were similar for the microscope and LVM, as was the mean slide viewing time. On the LVM, participants spent a significantly greater proportion of the total task time viewing slides and revisited slides more often. The unique design of the LVM, enabling real-time rendering of virtual slides while providing users with a quick and intuitive way to navigate within and between slides, makes use of digital pathology in routine practice a realistic possibility. With further practice with the system, diagnostic efficiency on the LVM is likely to increase yet more.}
}
Evaluation, solved and unsolved problems, and future directions are popular themes pervading the visualization community over the last decade. The top unsolved problem in both scientific and information visualization was the subject of an IEEE Visualization Conference panel in 2004. The future of graphics hardware was another important topic of discussion the same year. The subject of how to evaluate visualization returned a few years later. Chris Johnson published a list of 10 top problems in scientific visualization research. This was followed up by report of both past achievements and future challenges in visualization research as well as financial support recommendations to the National Science Foundation (NSF) and National Institute of Health (NIH). Chen recently published the first list of top unsolved information visualization problems. Future research directions of topology-based visualization was also a major theme of a workshop on topology-based methods. Laramee and Kosara published a list of top future challenges in human-centered visualization.
@incollection{wrro144593,
note = {{\copyright} Springer-Verlag London 2014. This is a post-peer-review, pre-copyedited version of book chapter published in Scientific Visualization. The final authenticated version is available online at: https://doi.org/10.1007/978-1-4471-6497-5\_19},
series = {Mathematics and Visualization},
doi = {10.1007/978-1-4471-6497-5\_19},
year = {2014},
pages = {205--211},
volume = {37},
month = {September},
journal = {Mathematics and Visualization},
title = {Future Challenges and Unsolved Problems in Multi-field Visualization},
publisher = {Springer, London},
keywords = {Tensor Field; Graphic Hardware; Display Primary; Scientific Visualization; Visual Metaphor},
abstract = {Evaluation, solved and unsolved problems, and future directions are popular themes pervading the visualization community over the last decade. The top unsolved problem in both scientific and information visualization was the subject of an IEEE Visualization Conference panel in 2004. The future of graphics hardware was another important topic of discussion the same year. The subject of how to evaluate visualization returned a few years later. Chris Johnson published a list of 10 top problems in scientific visualization research. This was followed up by report of both past achievements and future challenges in visualization research as well as financial support recommendations to the National Science Foundation (NSF) and National Institute of Health (NIH). Chen recently published the first list of top unsolved information visualization problems. Future research directions of topology-based visualization was also a major theme of a workshop on topology-based methods. Laramee and Kosara published a list of top future challenges in human-centered visualization.},
author = {Laramee, RS and Carr, H and Chen, M and Hauser, H and Linsen, L and Mueller, K and Natarajan, V and Obermaier, H and Peikert, R and Zhang, E},
isbn = {978-1-4471-6496-8},
issn = {1612-3786},
url = {https://eprints.whiterose.ac.uk/id/eprint/144593/}
}
Codes for computational science and downstream analysis (visualization and/or statistical modelling) have historically been dominated by imperative thinking, but this situation is evolving, both through adoption of higher-level tools such as Matlab, and through some adoption of functional ideas in the next generation of toolkits being driven by the vision of extreme-scale computing. However, this is still a long way from seeing a functional language like Haskell used in a live application. This paper makes three contributions to functional programming in computational science. First, we describe how use of Haskell was interleaved in the development of the first practical approach to multifield topology, and its application to the analysis of data from nuclear simulations that has led to new insight into fission. Second, we report subsequent developments of the functional code (i) improving sequential performance to approach that of an imperative implementation, and (ii) the introduction of parallelism through four skeletons exhibiting good scaling and different time/space trade-offs. Finally we consider the broader question of how, where, and why functional programming may - or may not - find further use in computational science.
@misc{wrro79906,
year = {2014},
doi = {10.1145/2636228.2636237},
editor = {M Sheeran and R Newton},
booktitle = {The 3rd ACM SIGPLAN Workshop on Functional High-Performance Computing},
month = {September},
journal = {Proceedings of the ACM Workshop on Functional High-Performance Computing},
pages = {11--21},
publisher = {ACM Press},
title = {Parallel Computation of Multifield Topology: Experience of Haskell in a Computational Science Application},
abstract = {Codes for computational science and downstream analysis (visualization and/or statistical modelling) have historically been dominated by imperative thinking, but this situation is evolving, both through adoption of higher-level tools such as Matlab, and through some adoption of functional ideas in the next generation of toolkits being driven by the vision of extreme-scale computing. However, this is still a long way from seeing a functional language like Haskell used in a live application. This paper makes three contributions to functional programming in computational science. First, we describe how use of Haskell was interleaved in the development of the first practical approach to multifield topology, and its application to the analysis of data from nuclear simulations that has led to new insight into fission. Second, we report subsequent developments of the functional code (i) improving sequential performance to approach that of an imperative implementation, and (ii) the introduction of parallelism through four skeletons exhibiting good scaling and different time/space trade-offs. Finally we consider the broader question of how, where, and why functional programming may - or may not - find further use in computational science.},
keywords = {Computational topology; joint contour net; Haskell; performance},
url = {http://dx.doi.org/10.1145/2636228.2636237},
isbn = {978-1-4503-3040-4},
author = {Duke, DJ and Hosseini, F and Carr, H}
}
The orientation of fibers in assemblies such as nonwovens has a major influence on the anisotropy of properties of the bulk structure and is strongly influenced by the processes used to manufacture the fabric. To build a detailed understanding of a fabric's geometry and architecture it is important that fiber orientation in three dimensions is evaluated since out-of-plane orientations may also contribute to the physical properties of the fabric. In this study, a technique for measuring fiber segment orientation as proposed by Eberhardt and Clarke is implemented and experimentally studied based on analysis of X-ray computed microtomographic data. Fiber segment orientation distributions were extracted from volumetric X-ray microtomography data sets of hydroentangled nonwoven fabrics manufactured from parallel-laid, cross-laid, and air-laid webs. Spherical coordinates represented the orientation of individual fibers. Physical testing of the samples by means of zero-span tensile testing and z-directional tensile testing was employed to compare with the computed results.
@article{wrro83459,
publisher = {Cambridge University Press},
title = {Three-Dimensional Fiber Segment Orientation Distribution Using X-Ray Microtomography},
month = {August},
journal = {Microscopy and Microanalysis},
volume = {20},
pages = {1294--1303},
year = {2014},
doi = {10.1017/S1431927614000695},
note = {{\copyright} Microscopy Society of America 2014. This is an author produced version of a paper published in Microscopy and Microanalysis. Uploaded in accordance with the publisher's self-archiving policy},
number = {4},
url = {http://dx.doi.org/10.1017/S1431927614000695},
author = {Tausif, M and Duffy, B and Carr, H and Grishanov, S and Russell, SJ},
abstract = {The orientation of fibers in assemblies such as nonwovens has a major influence on the anisotropy of properties of the bulk structure and is strongly influenced by the processes used to manufacture the fabric. To build a detailed understanding of a fabric's geometry and architecture it is important that fiber orientation in three dimensions is evaluated since out-of-plane orientations may also contribute to the physical properties of the fabric. In this study, a technique for measuring fiber segment orientation as proposed by Eberhardt and Clarke is implemented and experimentally studied based on analysis of X-ray computed microtomographic data. Fiber segment orientation distributions were extracted from volumetric X-ray microtomography data sets of hydroentangled nonwoven fabrics manufactured from parallel-laid, cross-laid, and air-laid webs. Spherical coordinates represented the orientation of individual fibers. Physical testing of the samples by means of zero-span tensile testing and z-directional tensile testing was employed to compare with the computed results.},
keywords = {Orientation distribution; Fiber; Nonwovens; Three dimensional; X-ray microtomography; Structure; Hydroentanglement},
issn = {1431-9276}
}
This paper presents the fundamental mathematics to determine the minimum crack width detectable with a terrestrial laser scanner in unit-based masonry. Orthogonal offset, interval scan angle, crack orientation, and crack depth are the main parameters. The theoretical work is benchmarked against laboratory tests using 4 samples with predesigned crack widths of 1-7 mm scanned at orthogonal distances of 5.0-12.5 m and at angles of 0 -30. Results showed that absolute errors of crack width were mostly less than 1.37 mm when the orthogonal distance varied 5.0-7.5 m but significantly increased for greater distances. The orthogonal distance had a disproportionately negative effect compared to the scan angle.
@article{wrro79316,
year = {2014},
doi = {10.1016/j.ndteint.2013.11.001},
note = {(c) 2014, Elsevier. NOTICE: this is the author's version of a work that was accepted for publication in NDT and E International. Changes resulting from the publishing process, such as peer review, editing, corrections, structural formatting, and other quality control mechanisms may not be reflected in this document. Changes may have been made to this work since it was submitted for publication. A definitive version was subsequently published in NDT and E International, 62, 2014, 10.1016/j.ndteint.2013.11.001
},
publisher = {Elsevier},
title = {Crack detection limits in unit based masonry with terrestrial laser scanning},
journal = {NDT and E International},
month = {March},
volume = {62},
pages = {66 -- 76},
url = {http://dx.doi.org/10.1016/j.ndteint.2013.11.001},
author = {Laefer, DF and Truong-Hong, L and Carr, H and Singh, M},
abstract = {This paper presents the fundamental mathematics to determine the minimum crack width detectable with a terrestrial laser scanner in unit-based masonry. Orthogonal offset, interval scan angle, crack orientation, and crack depth are the main parameters. The theoretical work is benchmarked against laboratory tests using 4 samples with predesigned crack widths of 1-7 mm scanned at orthogonal distances of 5.0-12.5 m and at angles of 0 -30. Results showed that absolute errors of crack width were mostly less than 1.37 mm when the orthogonal distance varied 5.0-7.5 m but significantly increased for greater distances. The orthogonal distance had a disproportionately negative effect compared to the scan angle.},
issn = {0963-8695},
keywords = {Terrestrial laser scanning; Point cloud data; Crack detection; Structural health monitoring; Condition assessment; Masonry}
}
As with individual fields, one approach to visualizing multifields is to analyze the field and identify features. While some work has been carried out in detecting features in multifields, any discussion of multifield analysis must also identify techniques from single fields that can be extended appropriately.
@incollection{wrro97576,
year = {2014},
booktitle = {Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization},
doi = {10.1007/978-1-4471-6497-5\_18},
editor = {CD Hansen and M Chen and CR Johnson and AE Kaufman and H Hagen},
series = {Mathematics and Visualization},
journal = {Mathematics and Visualization},
pages = {197--204},
publisher = {Springer-Verlag},
title = {Feature analysis in multifields},
address = {London},
url = {http://dx.doi.org/10.1007/978-1-4471-6497-5\%5f18},
isbn = {978-1-4471-6496-8},
author = {Carr, H},
abstract = {As with individual fields, one approach to visualizing multifields is to analyze the field and identify features. While some work has been carried out in detecting features in multifields, any discussion of multifield analysis must also identify techniques from single fields that can be extended appropriately.},
issn = {1612-3786}
}
Computational topology is of interest in visualization because it summarizes useful global properties of a dataset. The greatest need for such abstractions is in massive data, and to date most implementations have opted for low-level languages to obtain space and time-efficient implementations. Such code is complex, and is becoming even more so with the need to operate efficiently on a range of parallel hardware. Motivated by rapid advances in functional programming and compiler technology, this chapter investigates whether a shift in programming paradigm could reduce the complexity of the task. Focusing on contour tree generation as a case study, the chapter makes three contributions. First, it sets out the development of a concise functional implementation of the algorithm. Second, it shows that the sequential functional code can be tuned to match the performance of an imperative implementation, albeit at some cost in code clarity. Third, it outlines new possiblilities for parallelisation using functional tools, and notes similarities between functional abstractions and emerging ideas in extreme-scale visualization.
@incollection{wrro81914,
pages = {73 -- 88},
publisher = {Springer},
doi = {10.1007/978-3-319-04099-8\_5},
editor = {P-T Bremer and I Hotz and V Pascucci and R Peikert},
booktitle = {Topology-Based Methods in Visualization III},
year = {2014},
title = {Computational topology via functional programming: a baseline analysis},
url = {https://eprints.whiterose.ac.uk/id/eprint/81914/},
isbn = {978-3-319-04099-8},
author = {Duke, DJ and Carr, H},
abstract = {Computational topology is of interest in visualization because it summarizes useful global properties of a dataset. The greatest need for such abstractions is in massive data, and to date most implementations have opted for low-level languages to obtain space and time-efficient implementations. Such code is complex, and is becoming even more so with the need to operate efficiently on a range of parallel hardware. Motivated by rapid advances in functional programming and compiler technology, this chapter investigates whether a shift in programming paradigm could reduce the complexity of the task. Focusing on contour tree generation as a case study, the chapter makes three contributions. First, it sets out the development of a concise functional implementation of the algorithm. Second, it shows that the sequential functional code can be tuned to match the performance of an imperative implementation, albeit at some cost in code clarity. Third, it outlines new possiblilities for parallelisation using functional tools, and notes similarities between functional abstractions and emerging ideas in extreme-scale visualization.}
}
Multifield visualization covers a range of data types that can be visualized with many different techniques.We summarize both the data types and the categories of techniques, and lay out the reasoning for dividing this Part into chapters by technique rather than by data type. As we have seen in the previous chapter,multifield visualization covers a broad range of types of data. It is therefore possible to discuss multifield visualization according to these data types, with each type covered in a separate chapter. However, it is also possible to approach the question by considering the techniques to be applied, many of which can be applied to multiple types of multifield data. In this chapter, we therefore discuss bothways of analysingmultifield visualization techniques, and why we have chosen to proceed according to technique rather than type in the subsequent chapters.
@incollection{wrro97577,
publisher = {Springer-Verlag},
title = {Categorization},
address = {London},
journal = {Mathematics and Visualization},
pages = {111--117},
year = {2014},
editor = {CD Hansen and M Chen and CR Johnson and AE Kaufman and H Hagen},
doi = {10.1007/978-1-4471-6497-5\_11},
booktitle = {Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization},
series = {Mathematics and Visualization},
isbn = {978-1-4471-6496-8},
url = {http://dx.doi.org/10.1007/978-1-4471-6497-5\%5f11},
author = {Hauser, H and Carr, H},
abstract = {Multifield visualization covers a range of data types that can be visualized with many different techniques.We summarize both the data types and the categories of techniques, and lay out the reasoning for dividing this Part into chapters by technique rather than by data type. As we have seen in the previous chapter,multifield visualization covers a broad range of types of data. It is therefore possible to discuss multifield visualization according to these data types, with each type covered in a separate chapter. However, it is also possible to approach the question by considering the techniques to be applied, many of which can be applied to multiple types of multifield data. In this chapter, we therefore discuss bothways of analysingmultifield visualization techniques, and why we have chosen to proceed according to technique rather than type in the subsequent chapters.},
issn = {1612-3786}
}
This study investigated the effects of six attributes, associated with simplicity or attractiveness, on route preference for three pedestrian journey types (everyday, leisure and tourist). Using stated choice preference experiments with computer generated scenes, participants were asked to choose one of a pair of routes showing either two levels of the same attribute (experiment 1) or different attributes (experiment 2). Contrary to predictions, vegetation was the most influential for both everyday and leisure journeys, and land use ranked much lower than expected in both cases. Turns ranked higher than decision points for everyday journeys as predicted, but the positions of both were lowered by initially unranked attributes. As anticipated, points of interest were most important for tourist trips, with the initially unranked attributes having less influence. This is the first time so many attributes have been compared directly, providing new information about the importance of the attributes for different journeys. {\copyright} 2014 Springer International Publishing.
@misc{wrro80900,
year = {2014},
booktitle = { Spatial Cognition 2014},
doi = {10.1007/978-3-319-11215-2\_14},
editor = {C Freksa and B Nebel and M Hegarty and T Barkowsky},
note = {{\copyright} 2014, Springer Verlag. This is an author produced version of a paper published in Spatial Cognition IX: International Conference, Spatial Cognition 2014, Proceedings. Uploaded in accordance with the publisher's self-archiving policy.
The final publication is available at Springer via http://dx.doi.org/10.1007/978-3-319-11215-2\_14},
title = {Effect of simplicity and attractiveness on route selection for different journey types},
publisher = {Springer Verlag},
pages = {190 -- 205},
journal = {Spatial Cognition IX International Conference, Spatial Cognition 2014, Proceedings},
volume = {8684 L},
issn = {0302-9743},
url = {http://dx.doi.org/10.1007/978-3-319-11215-2\%5f14},
abstract = {This study investigated the effects of six attributes, associated with simplicity or attractiveness, on route preference for three pedestrian journey types (everyday, leisure and tourist). Using stated choice preference experiments with computer generated scenes, participants were asked to choose one of a pair of routes showing either two levels of the same attribute (experiment 1) or different attributes (experiment 2). Contrary to predictions, vegetation was the most influential for both everyday and leisure journeys, and land use ranked much lower than expected in both cases. Turns ranked higher than decision points for everyday journeys as predicted, but the positions of both were lowered by initially unranked attributes. As anticipated, points of interest were most important for tourist trips, with the initially unranked attributes having less influence. This is the first time so many attributes have been compared directly, providing new information about the importance of the attributes for different journeys. {\copyright} 2014 Springer International Publishing.},
keywords = {Attractiveness; pedestrian navigation; simplicity; wayfinding},
isbn = {9783319112145},
author = {Cook, S and Ruddle, RA}
}
How can the notion of topological structures for single scalar fields be extended to multifields? In this paper we propose a definition for such structures using the concepts of Pareto optimality and Pareto dominance. Given a set of piecewise-linear, scalar functions over a common simplical complex of any dimension, our method finds regions of "consensus" among single fields' critical points and their connectivity relations. We show that our concepts are useful to data analysis on real-world examples originating from fluid-flow simulations; in two cases where the consensus of multiple scalar vortex predictors is of interest and in another case where one predictor is studied under different simulation parameters. We also compare the properties of our approach with current alternatives.
@article{wrro79280,
year = {2013},
doi = {10.1111/cgf.12121},
number = {3 Pt 3},
publisher = {Wiley},
title = {Towards multifield scalar topology based on pareto optimality},
month = {June},
journal = {Computer Graphics Forum},
volume = {32},
pages = {341 -- 350},
abstract = {How can the notion of topological structures for single scalar fields be extended to multifields? In this paper we propose a definition for such structures using the concepts of Pareto optimality and Pareto dominance. Given a set of piecewise-linear, scalar functions over a common simplical complex of any dimension, our method finds regions of "consensus" among single fields' critical points and their connectivity relations. We show that our concepts are useful to data analysis on real-world examples originating from fluid-flow simulations; in two cases where the consensus of multiple scalar vortex predictors is of interest and in another case where one predictor is studied under different simulation parameters. We also compare the properties of our approach with current alternatives.},
issn = {0167-7055},
keywords = {Computer graphics; computational geometry and object modeling; geometric algorithms, languages, and systems},
url = {http://dx.doi.org/10.1111/cgf.12121},
author = {Huettenberger, L and Heine, C and Carr, H and Scheuermann, G and Garth, C}
}
Physical locomotion provides internal (body-based) sensory information about the translational and rotational components of movement. This chapter starts by summarizing the characteristics of model-, small- and large-scale VE applications, and attributes of ecological validity that are important for the application of navigation research. The type of navigation participants performed, the scale and spatial extent of the environment, and the richness of the visual scene are used to provide a framework for a review of research into the effect of body-based information on navigation. The review resolves contradictions between previous studies' findings, identifies types of navigation interface that are suited to different applications, and highlights areas in which further research is needed. Applications that take place in small-scale environments, where maneuvering is the most demanding aspect of navigation, will benefit from full-walking interfaces. However, collision detection may not be needed because users avoid obstacles even when they are below eye-level. Applications that involve large-scale spaces (e.g., buildings or cities) just need to provide the translational component of body-based information, because it is only in unusual scenarios that the rotational component of body-based information produces any significant benefit. This opens up the opportunity of combining linear treadmill and walking-in-place interfaces with projection displays that provide a wide field of view.
@incollection{wrro86512,
year = {2013},
doi = {10.1007/978-1-4419-8432-6\_5},
booktitle = {Human Walking in Virtual Environments: Perception, Technology, and Applications},
editor = {F Steinicke and Y Visell and J Campos and A Lecuyer},
publisher = {Springer},
title = {The effect of translational and rotational body-based information on navigation},
address = {New York},
month = {May},
pages = {99--112},
keywords = {Translational; Rotational; Body-based information; Navigation; Cognition; Spatial knowledge},
abstract = {Physical locomotion provides internal (body-based) sensory information about the translational and rotational components of movement. This chapter starts by summarizing the characteristics of model-, small- and large-scale VE applications, and attributes of ecological validity that are important for the application of navigation research. The type of navigation participants performed, the scale and spatial extent of the environment, and the richness of the visual scene are used to provide a framework for a review of research into the effect of body-based information on navigation. The review resolves contradictions between previous studies' findings, identifies types of navigation interface that are suited to different applications, and highlights areas in which further research is needed. Applications that take place in small-scale environments, where maneuvering is the most demanding aspect of navigation, will benefit from full-walking interfaces. However, collision detection may not be needed because users avoid obstacles even when they are below eye-level. Applications that involve large-scale spaces (e.g., buildings or cities) just need to provide the translational component of body-based information, because it is only in unusual scenarios that the rotational component of body-based information produces any significant benefit. This opens up the opportunity of combining linear treadmill and walking-in-place interfaces with projection displays that provide a wide field of view.},
author = {Ruddle, RA},
isbn = {9781441984319},
url = {http://dx.doi.org/10.1007/978-1-4419-8432-6\%5f5}
}
This article provides longitudinal data for when participants learned to travel with a walking metaphor through virtual reality (VR) worlds, using interfaces that ranged from joystick-only, to linear and omnidirectional treadmills, and actual walking in VR. Three metrics were used: travel time, collisions (a measure of accuracy), and the speed profile. The time that participants required to reach asymptotic performance for traveling, and what that asymptote was, varied considerably between interfaces. In particular, when a world had tight turns (0.75 m corridors), participants who walked were more proficient than those who used a joystick to locomote and turned either physically or with a joystick, even after 10 minutes of training. The speed profile showed that this was caused by participants spending a notable percentage of the time stationary, irrespective of whether or not they frequently played computer games. The study shows how speed profiles can be used to help evaluate participants' proficiency with travel interfaces, highlights the need for training to be structured to addresses specific weaknesses in proficiency (e.g., start-stop movement), and for studies to measure and report that proficiency.
@article{wrro76922,
title = {Learning to Walk in Virtual Reality},
publisher = {Association for computer machinery},
volume = {10},
month = {May},
journal = {ACM Transactions on Applied Perception},
doi = {10.1145/2465780.2465785},
year = {2013},
note = {{\copyright} ACM, 2013. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in , ACM Transactions on Applied Perception VOL 10, ISS 2, (May 2013) http://dx.doi.org/10.1145/2465780.2465785 },
number = {2},
abstract = {This article provides longitudinal data for when participants learned to travel with a walking metaphor through virtual reality (VR) worlds, using interfaces that ranged from joystick-only, to linear and omnidirectional treadmills, and actual walking in VR. Three metrics were used: travel time, collisions (a measure of accuracy), and the speed profile. The time that participants required to reach asymptotic performance for traveling, and what that asymptote was, varied considerably between interfaces. In particular, when a world had tight turns (0.75 m corridors), participants who walked were more proficient than those who used a joystick to locomote and turned either physically or with a joystick, even after 10 minutes of training. The speed profile showed that this was caused by participants spending a notable percentage of the time stationary, irrespective of whether or not they frequently played computer games. The study shows how speed profiles can be used to help evaluate participants' proficiency with travel interfaces, highlights the need for training to be structured to addresses specific weaknesses in proficiency (e.g., start-stop movement), and for studies to measure and report that proficiency.},
keywords = {Experimentation; Human Factors; Performance; Virtual reality interfaces; navigation; travel; metrics},
issn = {1544-3558},
url = {http://dx.doi.org/10.1145/2465780.2465785},
author = {Ruddle, RA and Volkova, E and Buelthoff, HH}
}
Many data sets are sampled on regular lattices in two, three or more dimensions, and recent work has shown that statistical properties of these data sets must take into account the continuity of the underlying physical phenomena. However, the effects of quantization on the statistics have not yet been accounted for. This paper therefore reconciles the previous papers to the underlying mathematical theory, develops a mathematical model of quantized statistics of continuous functions, and proves convergence of geometric approximations to continuous statistics for regular sampling lattices. In addition, the computational cost of various approaches is considered, and recommendations made about when to use each type of statistic.
@article{wrro79281,
pages = {263 -- 277 (14)},
month = {February},
journal = {IEEE Transactions on Visualization and Computer Graphics},
volume = {19},
title = {Integrating isosurface statistics and histograms},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
number = {2},
note = {(c) 2013 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
year = {2013},
doi = {10.1109/TVCG.2012.118},
url = {http://dx.doi.org/10.1109/TVCG.2012.118},
author = {Duffy, B and Carr, HA and M{\"o}ller, T},
abstract = {Many data sets are sampled on regular lattices in two, three or more dimensions, and recent work has shown that statistical properties of these data sets must take into account the continuity of the underlying physical phenomena. However, the effects of quantization on the statistics have not yet been accounted for. This paper therefore reconciles the previous papers to the underlying mathematical theory, develops a mathematical model of quantized statistics of continuous functions, and proves convergence of geometric approximations to continuous statistics for regular sampling lattices. In addition, the computational cost of various approaches is considered, and recommendations made about when to use each type of statistic.},
keywords = {Frequency distribution; geometric statistics; histograms; integration},
issn = {1077-2626}
}
Aims: To create and evaluate a virtual reality (VR) microscope that is as efficient as the conventional microscope, seeking to support the introduction of digital slides into routine practice. Methods and results: A VR microscope was designed and implemented by combining ultra-high-resolution displays with VR technology, techniques for fast interaction, and high usability. It was evaluated using a mixed factorial experimental design with technology and task as within-participant variables and grade of histopathologist as a between-participant variable. Time to diagnosis was similar for the conventional and VR microscopes. However, there was a significant difference in the mean magnification used between the two technologies, with participants working at a higher level of magnification on the VR microscope. Conclusions: The results suggest that, with the right technology, efficient use of digital pathology for routine practice is a realistic possibility. Further work is required to explore what magnification is required on the VR microscope for histopathologists to identify diagnostic features, and the effect on this of the digital slide production process.
@article{wrro74853,
title = {Virtual reality microscope versus conventional microscope regarding time to diagnosis: an experimental study.},
publisher = {Wiley},
pages = {351--358},
journal = {Histopathology},
month = {January},
volume = {62},
year = {2013},
doi = {10.1111/j.1365-2559.2012.04323.x},
note = {{\copyright} 2013, Blackwell Publishing. This is an author produced version of a paper published in Histopathology. Uploaded in accordance with the publisher's self-archiving policy.},
number = {2},
author = {Randell, R and Ruddle, RA and Mello-Thoms, C and Thomas, RG and Quirke, P and Treanor, D},
url = {http://dx.doi.org/10.1111/j.1365-2559.2012.04323.x},
issn = {0309-0167},
abstract = {Aims: To create and evaluate a virtual reality (VR) microscope that is as efficient as the conventional microscope, seeking to support the introduction of digital slides into routine practice. Methods and results: A VR microscope was designed and implemented by combining ultra-high-resolution displays with VR technology, techniques for fast interaction, and high usability. It was evaluated using a mixed factorial experimental design with technology and task as within-participant variables and grade of histopathologist as a between-participant variable. Time to diagnosis was similar for the conventional and VR microscopes. However, there was a significant difference in the mean magnification used between the two technologies, with participants working at a higher level of magnification on the VR microscope. Conclusions: The results suggest that, with the right technology, efficient use of digital pathology for routine practice is a realistic possibility. Further work is required to explore what magnification is required on the VR microscope for histopathologists to identify diagnostic features, and the effect on this of the digital slide production process.}
}
Contour trees and Reeb graphs are firmly embedded in scientific visualization for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net and demonstrate that Contour Trees for individual variables can be extracted from the Joint Contour Net.
@misc{wrro79239,
pages = {161 -- 168},
journal = {Visualization Symposium (PacificVis), 2013 IEEE Pacific},
title = {Joint contour nets: computation and properties},
publisher = {IEEE},
doi = {10.1109/PacificVis.2013.6596141},
booktitle = {2013 IEEE Pacific Visualization Symposium},
year = {2013},
author = {Carr, H and Duke, D},
isbn = {978-1-4673-4797-6},
keywords = {Computational topology; Contour analysis; contour tree; Joint Contour Net; Multivariate; Reeb graph; Reeb space},
abstract = {Contour trees and Reeb graphs are firmly embedded in scientific visualization for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net and demonstrate that Contour Trees for individual variables can be extracted from the Joint Contour Net.},
url = {http://dx.doi.org/10.1109/PacificVis.2013.6596141},
issn = {2165-8765}
}
Contour Trees and Reeb Graphs are firmly embedded in scientific visualisation for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net, and demonstrate some of the properties that make it practically useful for visualisation, including accelerating computation by exploiting a relationship with rasterisation in the range of the function.
@article{wrro79282,
journal = {IEEE Transactions on Visualization and Computer Graphics},
note = {(c) 2013 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
publisher = {Institute of Electrical and Electronics Engineers},
doi = {10.1109/TVCG.2013.269},
year = {2013},
title = {Joint contour nets},
abstract = {Contour Trees and Reeb Graphs are firmly embedded in scientific visualisation for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net, and demonstrate some of the properties that make it practically useful for visualisation, including accelerating computation by exploiting a relationship with rasterisation in the range of the function.},
keywords = {computational topology; contour tree; reeb graph; multivariate; contour analysis; reeb space; joint contour net},
issn = {1077-2626},
url = {http://dx.doi.org/10.1109/TVCG.2013.269},
author = {Duke, DJ and Carr, H}
}
Traditional documentation capabilities of laser scanning technology can be further exploited for urban modeling through the transformation of resulting point clouds into solid models compatible for computational analysis. This article introduces such a technique through the combination of an angle criterion and voxelization. As part of that, a k-nearest neighbor (kNN) searching algorithm is implemented using a predefined number of kNN points combined with a maximum radius of the neighborhood, something not previously implemented. From this sample, points are categorized as boundary or interior points based on an angle criterion. Façade features are determined based on underlying vertical and horizontal grid voxels of the feature boundaries by a grid clustering technique. The complete building model involving all full voxels is generated by employing the Flying Voxel method to relabel voxels that are inside openings or outside the façade as empty voxels. Experimental results on three different buildings, using four distinct sampling densities showed successful detection of all openings, reconstruction of all building façades, and automatic filling of all improper holes. The maximum nodal displacement divergence was 1.6\% compared to manually generated meshes from measured drawings. This fully automated approach rivals processing times of other techniques with the distinct advantage of extracting more boundary points, especially in less dense data sets ({\ensuremath{<}}175 points/m2), which may enable its more rapid exploitation of aerial laser scanning data and ultimately preclude needing a priori knowledge.
@article{wrro79317,
number = {2},
note = {(c) 2013, Wiley. This is the accepted version of the following article: Truong-Hong, L, Laefer, DF, Hinks, T and Carr, H () Combining an angle criterion with voxelization and the flying voxel method in reconstructing building models from LiDAR data. Computer-Aided Civil and Infrastructure Engineering, 28 (2). 112 - 129. ISSN 1093-9687, which has been published in final form at http://dx.doi.org/10.1111/j.1467-8667.2012.00761.x},
doi = {10.1111/j.1467-8667.2012.00761.x},
year = {2013},
volume = {28},
journal = {Computer-Aided Civil and Infrastructure Engineering},
pages = {112 -- 129},
publisher = {Wiley},
title = {Combining an angle criterion with voxelization and the flying voxel method in reconstructing building models from LiDAR data},
abstract = {Traditional documentation capabilities of laser scanning technology can be further exploited for urban modeling through the transformation of resulting point clouds into solid models compatible for computational analysis. This article introduces such a technique through the combination of an angle criterion and voxelization. As part of that, a k-nearest neighbor (kNN) searching algorithm is implemented using a predefined number of kNN points combined with a maximum radius of the neighborhood, something not previously implemented. From this sample, points are categorized as boundary or interior points based on an angle criterion. Fa{\cc}ade features are determined based on underlying vertical and horizontal grid voxels of the feature boundaries by a grid clustering technique. The complete building model involving all full voxels is generated by employing the Flying Voxel method to relabel voxels that are inside openings or outside the fa{\cc}ade as empty voxels. Experimental results on three different buildings, using four distinct sampling densities showed successful detection of all openings, reconstruction of all building fa{\cc}ades, and automatic filling of all improper holes. The maximum nodal displacement divergence was 1.6\% compared to manually generated meshes from measured drawings. This fully automated approach rivals processing times of other techniques with the distinct advantage of extracting more boundary points, especially in less dense data sets ({\ensuremath{<}}175 points/m2), which may enable its more rapid exploitation of aerial laser scanning data and ultimately preclude needing a priori knowledge.},
issn = {1093-9687},
url = {http://dx.doi.org/10.1111/j.1467-8667.2012.00761.x},
author = {Truong-Hong, L and Laefer, DF and Hinks, T and Carr, H}
}
The scale of comparative genomics data frequently overwhelms current data visualization methods on conventional (desktop) displays. This paper describes two types of solution that take advantage of wall-sized high-resolution displays (WHirDs), which have orders of magnitude more display real estate (i.e., pixels) than desktop displays. The first allows users to view detailed graphics of copy number variation (CNV) that were output by existing software. A WHirD's resolution allowed a 10{$\times$} increase in the granularity of bioinformatics output that was feasible for users to visually analyze, and this revealed a pattern that had previously been smoothed out from the underlying data. The second involved interactive visualization software that was innovative because it uses a music score metaphor to lay out CNV data, overcomes a perceptual distortion caused by amplification/deletion thresholds, uses filtering to reduce graphical data overload, and is the first comparative genomics visualization software that is designed to leverage a WHirD's real estate. In a field evaluation, a clinical user discovered a fundamental error in the way their data had been processed, and established confidence in the software by using it to 'find' known genetic patterns in hepatitis C-driven hepatocellular cancer.
@misc{wrro79191,
note = {(c) 2013, IEEE. This is the publishers draft version of a paper published in Proceedings, 2013 IEEE Symposium on Biological Data Visualization (BioVis). Uploaded in accordance with the publisher's self-archiving policy
},
doi = {10.1109/BioVis.2013.6664351},
booktitle = {2013 IEEE Symposium on Biological Data Visualization (BioVis)},
year = {2013},
journal = {BioVis 2013 - IEEE Symposium on Biological Data Visualization 2013, Proceedings},
pages = {89 -- 96},
publisher = {IEEE},
title = {Leveraging wall-sized high-resolution displays for comparative genomics analyses of copy number variation},
url = {http://dx.doi.org/10.1109/BioVis.2013.6664351},
author = {Ruddle, RA and Fateen, W and Treanor, D and Quirke, P and Sondergeld, P},
abstract = {The scale of comparative genomics data frequently overwhelms current data visualization methods on conventional (desktop) displays. This paper describes two types of solution that take advantage of wall-sized high-resolution displays (WHirDs), which have orders of magnitude more display real estate (i.e., pixels) than desktop displays. The first allows users to view detailed graphics of copy number variation (CNV) that were output by existing software. A WHirD's resolution allowed a 10{$\times$} increase in the granularity of bioinformatics output that was feasible for users to visually analyze, and this revealed a pattern that had previously been smoothed out from the underlying data. The second involved interactive visualization software that was innovative because it uses a music score metaphor to lay out CNV data, overcomes a perceptual distortion caused by amplification/deletion thresholds, uses filtering to reduce graphical data overload, and is the first comparative genomics visualization software that is designed to leverage a WHirD's real estate. In a field evaluation, a clinical user discovered a fundamental error in the way their data had been processed, and established confidence in the software by using it to 'find' known genetic patterns in hepatitis C-driven hepatocellular cancer.},
keywords = {Copy number variation; comparative genomics; wall-sized high-resolution displays; visualization; user interface}
}
In nuclear science, density functional theory (DFT) is a powerful tool to model the complex interactions within the atomic nucleus, and is the primary theoretical approach used by physicists seeking a better understanding of fission. However DFT simulations result in complex multivariate datasets in which it is difficult to locate the crucial `scission' point at which one nucleus fragments into two, and to identify the precursors to scission. The Joint Contour Net (JCN) has recently been proposed as a new data structure for the topological analysis of multivariate scalar fields, analogous to the contour tree for univariate fields. This paper reports the analysis of DFT simulations using the JCN, the first application of the JCN technique to real data. It makes three contributions to visualization: (i) a set of practical methods for visualizing the JCN, (ii) new insight into the detection of nuclear scission, and (iii) an analysis of aesthetic criteria to drive further work on representing the JCN.
@article{wrro77400,
volume = {18},
month = {December},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {2033 -- 2040},
publisher = {Institute of Electrical and Electronics Engineers},
title = {Visualizing nuclear scission through a multifield extension of topological analysis},
note = {(c) 2012, IEEE. This is an author produced version of a paper published in IEEE Transactions on Visualization and Computer Graphics. Uploaded with permission from the publisher.
},
number = {12},
doi = {10.1109/TVCG.2012.287},
year = {2012},
issn = {1077-2626},
keywords = {topology; scalar fields; multifields},
abstract = {In nuclear science, density functional theory (DFT) is a powerful tool to model the complex interactions within the atomic nucleus, and is the primary theoretical approach used by physicists seeking a better understanding of fission. However DFT simulations result in complex multivariate datasets in which it is difficult to locate the crucial `scission' point at which one nucleus fragments into two, and to identify the precursors to scission. The Joint Contour Net (JCN) has recently been proposed as a new data structure for the topological analysis of multivariate scalar fields, analogous to the contour tree for univariate fields. This paper reports the analysis of DFT simulations using the JCN, the first application of the JCN technique to real data. It makes three contributions to visualization: (i) a set of practical methods for visualizing the JCN, (ii) new insight into the detection of nuclear scission, and (iii) an analysis of aesthetic criteria to drive further work on representing the JCN.},
author = {Duke, DJ and Carr, H and Knoll, A and Schunck, N and Nam, HA and Staszczak, A},
url = {http://dx.doi.org/10.1109/TVCG.2012.287}
}
Histopathologists diagnose cancer and other diseases by using a microscope to examine glass slides containing thin sections of human tissue. Technological advances mean that it is now possible to digitise the slides so that they can be viewed on a computer, promising a number of benefits in terms of both efficiency and safety. Despite this, uptake of digital microscopy for diagnostic work has been slow, and research suggests scepticism and uncertainty amongst histopathologists. In order to design a successful digital microscope, one which fits with the work practices of histopathologists and which they are happy to use within their daily work, we have undertaken a workplace study of a histopathology department. In this paper, we present the findings of that study and discuss the implications of these findings for the design of a digital microscope. The findings emphasise the way in which a diagnosis is built up as particular features on the glass slides are noticed and highlighted and the various information sources that are drawn on in the process of making a diagnosis.
@article{wrro75286,
title = {Diagnosis at the microscope: A workplace study of histopathology},
publisher = {Springer Verlag},
pages = {319 -- 335 },
volume = {14},
month = {November},
journal = {Cognition, Technology and Work},
doi = {10.1007/s10111-011-0182-7},
year = {2012},
note = {{\copyright} 2012, Springer Verlag. This is an author produced version of an article published in Cognition, Technology and Work. Uploaded in accordance with the publisher's self-archiving policy. The final publication is available at www.springerlink.com},
number = {4},
abstract = {Histopathologists diagnose cancer and other diseases by using a microscope to examine glass slides containing thin sections of human tissue. Technological advances mean that it is now possible to digitise the slides so that they can be viewed on a computer, promising a number of benefits in terms of both efficiency and safety. Despite this, uptake of digital microscopy for diagnostic work has been slow, and research suggests scepticism and uncertainty amongst histopathologists. In order to design a successful digital microscope, one which fits with the work practices of histopathologists and which they are happy to use within their daily work, we have undertaken a workplace study of a histopathology department. In this paper, we present the findings of that study and discuss the implications of these findings for the design of a digital microscope. The findings emphasise the way in which a diagnosis is built up as particular features on the glass slides are noticed and highlighted and the various information sources that are drawn on in the process of making a diagnosis.},
keywords = {Healthcare, Histopathology, Digital pathology, Workplace study},
issn = {1435-5558},
url = {http://dx.doi.org/10.1007/s10111-011-0182-7},
author = {Randell, R and Ruddle, RA and Thomas, R and Treanor, D}
}
Aims: To study the current work practice of histopathologists to inform the design of digital microscopy systems. Methods and results: Four gastrointestinal histopathologists were video-recorded as they undertook their routine work. Analysis of the video data shows a range of activities beyond viewing slides involved in reporting a case. There is much overlapping of activities, supported by the 'eyes free' nature of the pathologists' interaction with the microscope. The order and timing of activities varies according to consultant. Conclusions: In order to support the work of pathologists adequately, digital microscopy systems need to provide support for a range of activities beyond viewing slides. Digital microscopy systems should support multitasking, while also providing flexibility so that pathologists can adapt their use of the technology to their own working patterns.
@article{wrro74329,
publisher = {Blackwell publishing},
title = {Working at the microscope: analysis of the activities involved in diagnostic pathology},
journal = {Histopathology},
month = {February},
volume = {60},
pages = {504 -- 510 },
year = {2012},
doi = {10.1111/j.1365-2559.2011.04090.x},
note = {{\copyright} 2012, Blackwell Publishing. This is an author produced version of a paper published in Histopathology. Uploaded in accordance with the publisher's self-archiving policy.
The definitive version is available at www.blackwell-synergy.com},
number = {3},
author = {Randell, R and Ruddle, RA and Quirke, P and Thomas, RG and Treanor, D},
url = {http://dx.doi.org/10.1111/j.1365-2559.2011.04090.x},
issn = {0309-0167},
abstract = {Aims: To study the current work practice of histopathologists to inform the design of digital microscopy systems. Methods and results: Four gastrointestinal histopathologists were video-recorded as they undertook their routine work. Analysis of the video data shows a range of activities beyond viewing slides involved in reporting a case. There is much overlapping of activities, supported by the 'eyes free' nature of the pathologists' interaction with the microscope. The order and timing of activities varies according to consultant. Conclusions: In order to support the work of pathologists adequately, digital microscopy systems need to provide support for a range of activities beyond viewing slides. Digital microscopy systems should support multitasking, while also providing flexibility so that pathologists can adapt their use of the technology to their own working patterns.}
}
On the WWW users frequently revisit information they have previously seen, but "keeping found things found" is difficult when the information has not been visited frequently or recently, even if a user knows which website contained the information. This paper describes the design of a tool to help users refind information within a given website. The tool encodes data about a user's interest in webpages (measured by dwell time), the frequency and recency of visits, and navigational associations between pages, and presents navigation histories in list-and graph-based forms.
@article{wrro74330,
doi = {10.1007/978-3-642-28997-2\_41},
publisher = {Springer},
year = {2012},
title = {The design of a visual history tool to help users refind information within a website},
volume = {7224},
journal = {Lecture Notes in Computer Science (including subseries Lecture Notes in Artificial Intelligence and Lecture Notes in Bioinformatics)},
pages = {459 -- 462 },
note = {{\copyright} 2012,Springer. This is an author produced version of a paper published in Lecture notes in computer science. Uploaded in accordance with the publisher's self-archiving policy.
},
issn = {0302-9743},
abstract = {On the WWW users frequently revisit information they have previously seen, but "keeping found things found" is difficult when the information has not been visited frequently or recently, even if a user knows which website contained the information. This paper describes the design of a tool to help users refind information within a given website. The tool encodes data about a user's interest in webpages (measured by dwell time), the frequency and recency of visits, and navigational associations between pages, and presents navigation histories in list-and graph-based forms.},
author = {Do, TV and Ruddle, RA},
url = {http://dx.doi.org/10.1007/978-3-642-28997-2\%5f41}
}
Image analysis algorithms are often highly parameterized and much human input is needed to optimize parameter settings. This incurs a time cost of up to several days. We analyze and characterize the conventional parameter optimization process for image analysis and formulate user requirements. With this as input, we propose a change in paradigm by optimizing parameters based on parameter sampling and interactive visual exploration. To save time and reduce memory load, users are only involved in the first step–initialization of sampling–and the last step–visual analysis of output. This helps users to more thoroughly explore the parameter space and produce higher quality results. We describe a custom sampling plug-in we developed for CellProfiler–a popular biomedical image analysis framework. Our main focus is the development of an interactive visualization technique that enables users to analyze the relationships between sampled input parameters and corresponding output. We implemented this in a prototype called Paramorama. It provides users with a visual overview of parameters and their sampled values. User-defined areas of interest are presented in a structured way that includes image-based output and a novel layout algorithm. To find optimal parameter settings, users can tag high- and low-quality results to refine their search. We include two case studies to illustrate the utility of this approach.
@article{wrro74328,
volume = {17},
journal = {IEEE Transactions on Visualization and Computer Graphics},
month = {December},
pages = {2402 -- 2411 },
publisher = {IEEE},
title = {Visualization of parameter space for image analysis},
note = {{\copyright} 2011, IEEE. This is an author produced version of a paper published in IEEE Transactions on Visualization and Computer Graphics. Uploaded in accordance with the publisher's self-archiving policy.
},
number = {12},
doi = {10.1109/TVCG.2011.253},
year = {2011},
author = {Pretorius, AJ and Bray, MA and Carpenter, AE and Ruddle, RA},
url = {http://dx.doi.org/10.1109/TVCG.2011.253},
keywords = {Algorithms, Androstadienes, Cell Line, Cell Nucleus, Chromones, Computer Graphics, Computer Simulation, Humans, Image Processing, Computer-Assisted, Morpholines, Software, User-Computer Interface},
issn = {1077-2626},
abstract = {Image analysis algorithms are often highly parameterized and much human input is needed to optimize parameter settings. This incurs a time cost of up to several days. We analyze and characterize the conventional parameter optimization process for image analysis and formulate user requirements. With this as input, we propose a change in paradigm by optimizing parameters based on parameter sampling and interactive visual exploration. To save time and reduce memory load, users are only involved in the first step--initialization of sampling--and the last step--visual analysis of output. This helps users to more thoroughly explore the parameter space and produce higher quality results. We describe a custom sampling plug-in we developed for CellProfiler--a popular biomedical image analysis framework. Our main focus is the development of an interactive visualization technique that enables users to analyze the relationships between sampled input parameters and corresponding output. We implemented this in a prototype called Paramorama. It provides users with a visual overview of parameters and their sampled values. User-defined areas of interest are presented in a structured way that includes image-based output and a novel layout algorithm. To find optimal parameter settings, users can tag high- and low-quality results to refine their search. We include two case studies to illustrate the utility of this approach.}
}
This study investigated the effect of body-based information (proprioception, etc.) when participants navigated large-scale virtual marketplaces that were either small (Experiment 1) or large in extent (Experiment 2). Extent refers to the size of an environment, whereas scale refers to whether people have to travel through an environment to see the detail necessary for navigation. Each participant was provided with full body-based information (walking through the virtual marketplaces in a large tracking hall or on an omnidirectional treadmill), just the translational component of body-based information (walking on a linear treadmill, but turning with a joystick), just the rotational component (physically turning but using a joystick to translate) or no body-based information (joysticks to translate and rotate). In large and small environments translational body-based information significantly improved the accuracy of participants' cognitive maps, measured using estimates of direction and relative straight line distance but, on its own, rotational body-based information had no effect. In environments of small extent, full body-based information also improved participants' navigational performance. The experiments show that locomotion devices such as linear treadmills would bring substantial benefits to virtual environment applications where large spaces are navigated, and theories of human navigation need to reconsider the contribution made by body-based information, and distinguish between environmental scale and extent.
@article{wrro74327,
doi = {10.1145/1970378.1970384},
year = {2011},
note = {{\copyright} ACM, 2011. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in ACM Transactions on Computer - Human Interaction, VOL 18, ISS 2,(2011) http://doi.acm.org/10.1145/1970378.1970384},
number = {2},
publisher = {Association for Computing Machinery},
title = {Walking improves your cognitive map in environments that are large-scale and large in extent},
volume = {18},
journal = {ACM Transactions on Computer - Human Interaction},
month = {June},
keywords = {virtual reality, navigation, locomotion, cognitive map, virtual environments, path-integration, spatial knowledge, optic flow, locomotion, navigation, distance, real, landmarks, senses},
issn = {1073-0516},
abstract = {This study investigated the effect of body-based information (proprioception, etc.) when participants navigated large-scale virtual marketplaces that were either small (Experiment 1) or large in extent (Experiment 2). Extent refers to the size of an environment, whereas scale refers to whether people have to travel through an environment to see the detail necessary for navigation. Each participant was provided with full body-based information (walking through the virtual marketplaces in a large tracking hall or on an omnidirectional treadmill), just the translational component of body-based information (walking on a linear treadmill, but turning with a joystick), just the rotational component (physically turning but using a joystick to translate) or no body-based information (joysticks to translate and rotate). In large and small environments translational body-based information significantly improved the accuracy of participants' cognitive maps, measured using estimates of direction and relative straight line distance but, on its own, rotational body-based information had no effect. In environments of small extent, full body-based information also improved participants' navigational performance. The experiments show that locomotion devices such as linear treadmills would bring substantial benefits to virtual environment applications where large spaces are navigated, and theories of human navigation need to reconsider the contribution made by body-based information, and distinguish between environmental scale and extent.},
author = {Ruddle, RA and Volkova, E and Bulthoff, HH},
url = {http://dx.doi.org/10.1145/1970378.1970384}
}
Two experiments investigated the effects of landmarks and body-based information on route knowledge. Participants made four out-and-back journeys along a route, guided only on the first outward trip and with feedback every time an error was made. Experiment 1 used 3-D virtual environments (VEs) with a desktop monitor display, and participants were provided with no supplementary landmarks, only global landmarks, only local landmarks, or both global and local landmarks. Local landmarks significantly reduced the number of errors that participants made, but global landmarks did not. Experiment 2 used a head-mounted display; here, participants who physically walked through the VE (translational and rotational body-based information) made 36\% fewer errors than did participants who traveled by physically turning but changing position using a joystick. Overall, the experiments showed that participants were less sure of where to turn than which way, and journey direction interacted with sensory information to affect the number and types of errors participants made.
@article{wrro74325,
pages = {686 -- 699 },
journal = {Memory and Cognition},
month = {May},
volume = {39},
title = {The effect of landmark and body-based sensory information on route knowledge},
publisher = {Psychonomic Society},
note = {{\copyright} 2011, Psychonomic Society. This is an author produced version of a paper published in Memory and Cognition. Uploaded in accordance with the publisher's self-archiving policy.
},
number = {4},
year = {2011},
doi = {10.3758/s13421-010-0054-z},
url = {http://dx.doi.org/10.3758/s13421-010-0054-z},
author = {Ruddle, RA and Volkova, E and Mohler, B and B{\"u}lthoff, HH},
abstract = {Two experiments investigated the effects of landmarks and body-based information on route knowledge. Participants made four out-and-back journeys along a route, guided only on the first outward trip and with feedback every time an error was made. Experiment 1 used 3-D virtual environments (VEs) with a desktop monitor display, and participants were provided with no supplementary landmarks, only global landmarks, only local landmarks, or both global and local landmarks. Local landmarks significantly reduced the number of errors that participants made, but global landmarks did not. Experiment 2 used a head-mounted display; here, participants who physically walked through the VE (translational and rotational body-based information) made 36\% fewer errors than did participants who traveled by physically turning but changing position using a joystick. Overall, the experiments showed that participants were less sure of where to turn than which way, and journey direction interacted with sensory information to affect the number and types of errors participants made.},
issn = {0090-502X},
keywords = {Adult, Cues, Female, Humans, Kinesthesis, Locomotion, Male, Mental Recall, Orientation, Pattern Recognition, Visual, Proprioception, Space Perception, User-Computer Interface, Young Adult}
}
Information spaces such the WWW are the most challenging type of space that many people navigate during everyday life. Unlike the real world, there are no effective maps of information spaces, so people are forced to rely on search engines which are only suited to some types of retrieval task. This paper describes a new method for creating maps of information spaces, called INSPIRE. The INSPIRE engine is a tree drawing algorithm that uses a city metaphor, comprised of streets and buildings, and generates maps entirely automatically from webcrawl data. A technical evaluation was carried out using data from 112 universities, which had up to 485,775 pages on their websites. Although they take longer to compute than radial layouts (e.g., the Bubble Tree), INSPIRE maps are much more compact. INSPIRE maps also have desirable aesthetic properties of being orthogonal, preserving symmetry between identical subtrees and being planar.
@article{wrro74324,
pages = {273 -- 279 },
note = {{\copyright} 2010, IEEE. This is an author produced version of a paper published in Information Visualisation (IV), 2010 14th International Conference. Uploaded in accordance with the publisher's self-archiving policy.
Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.
},
journal = {Proceedings of the International Conference on Information Visualisation},
title = {INSPIRE: A new method of mapping information spaces},
year = {2010},
doi = {10.1109/IV.2010.48},
publisher = {IEEE},
abstract = {Information spaces such the WWW are the most challenging type of space that many people navigate during everyday life. Unlike the real world, there are no effective maps of information spaces, so people are forced to rely on search engines which are only suited to some types of retrieval task. This paper describes a new method for creating maps of information spaces, called INSPIRE. The INSPIRE engine is a tree drawing algorithm that uses a city metaphor, comprised of streets and buildings, and generates maps entirely automatically from webcrawl data. A technical evaluation was carried out using data from 112 universities, which had up to 485,775 pages on their websites. Although they take longer to compute than radial layouts (e.g., the Bubble Tree), INSPIRE maps are much more compact. INSPIRE maps also have desirable aesthetic properties of being orthogonal, preserving symmetry between identical subtrees and being planar.},
issn = {1093-9547},
url = {http://dx.doi.org/10.1109/IV.2010.48},
author = {Ruddle, RA}
}
Virtual slides could replace the conventional microscope. However, it can take 60\% longer to make a diagnosis with a virtual slide, due to the small display size and inadequate user interface of current systems. The aim was to create and test a virtual reality (VR) microscope using a Powerwall (a high-resolution array of 28 computer screens) for viewing virtual slides more efficiently.
@article{wrro74323,
doi = {10.1111/j.1365-2559.2009.03389.x},
year = {2009},
note = {{\copyright} 2009, Blackwell Publishing. This is an author produced version of a paper : Treanor, D, Jordan-Owers, N, Hodrien, J, Wood, J, Quirke, P and Ruddle, RA (2009) Virtual reality Powerwall versus conventional microscope for viewing pathology slides: an experimental comparison. Histopathology, 55 (3). 294 - 300, which has been published in final form at: http://dx.doi.org/10.1111/j.1365-2559.2009.03389.x},
number = {3},
title = {Virtual reality Powerwall versus conventional microscope for viewing pathology slides: an experimental comparison},
publisher = {Wiley},
pages = {294--300},
volume = {55},
journal = {Histopathology},
month = {September},
issn = {0309-0167},
keywords = {Carcinoma, Basal Cell; Carcinoma, Squamous Cell; Diagnosis, Differential; Diagnostic Techniques and Procedures; Equipment Design; Humans; Image Processing, Computer-Assisted; Lymph Nodes; Microscopy; Pathology, Surgical; Skin Neoplasms; Tissue Array Analysis; User-Computer Interface},
abstract = {Virtual slides could replace the conventional microscope. However, it can take 60\% longer to make a diagnosis with a virtual slide, due to the small display size and inadequate user interface of current systems. The aim was to create and test a virtual reality (VR) microscope using a Powerwall (a high-resolution array of 28 computer screens) for viewing virtual slides more efficiently.},
author = {Treanor, D and Jordan-Owers, N and Hodrien, J and Wood, J and Quirke, P and Ruddle, RA},
url = {http://dx.doi.org/10.1111/j.1365-2559.2009.03389.x}
}
Mobile group dynamics (MGDs) assist synchronous working in collaborative virtual environments (CVEs), and virtual time (VT) extends the benefits to asynchronous working. The present paper describes the implementation of MGDs (teleporting, awareness and multiple views) and VT (the utterances of 23 previous users were embedded in a CVE as conversation tags), and their evaluation using an urban planning task. Compared with previous research using the same scenario, the new MGD techniques produced substantial increases in the amount that, and distance over which, participants communicated. With VT participants chose to listen to a quarter of the conversations of their predecessors while performing the task. The embedded VT conversations led to a reduction in the rate at which participants traveled around, but an increase in live communication that took place. Taken together, the studies show how CVE interfaces can be improved for synchronous and asynchronous collaborations, and highlight possibilities for future research.
@article{wrro8630,
pages = {130--138},
month = {April},
journal = {Computers \& Graphics},
volume = {33},
title = {Using mobile group dynamics and virtual time to improve teamwork in large-scale collaborative virtual environments
},
publisher = {Elsevier Ltd},
number = {2},
note = {{\copyright} 2009 Elsevier Ltd. This is an author produced version of a paper published in Computers \& Graphics. Uploaded in accordance with the publisher's self-archiving policy.
},
year = {2009},
doi = {doi:10.1016/j.cag.2009.01.001},
keywords = {Collaborative virtual environments, virtual reality, asynchronous collaboration, group dynamics},
issn = {0097-8493},
abstract = {Mobile group dynamics (MGDs) assist synchronous working in collaborative virtual environments (CVEs), and virtual time (VT) extends the benefits to asynchronous working. The present paper describes the implementation of MGDs (teleporting, awareness and multiple views) and VT (the utterances of 23 previous users were embedded in a CVE as conversation tags), and their evaluation using an urban planning task. Compared with previous research using the same scenario, the new MGD techniques produced substantial increases in the amount that, and distance over which, participants communicated. With VT participants chose to listen to a quarter of the conversations of their predecessors while performing the task. The embedded VT conversations led to a reduction in the rate at which participants traveled around, but an increase in live communication that took place. Taken together, the studies show how CVE interfaces can be improved for synchronous and asynchronous collaborations, and highlight possibilities for future research.
},
author = {Dodds, T. J. and Ruddle, R. A.},
url = {http://dx.doi.org/10.1016/j.cag.2009.01.001}
}
Navigation is the most common interactive task performed in three-dimensional virtual environments (VEs), but it is also a task that users often find difficult. We investigated how body-based information about the translational and rotational components of movement helped participants to perform a navigational search task (finding targets hidden inside boxes in a room-sized space). When participants physically walked around the VE while viewing it on a head-mounted display (HMD), they then performed 90% of trials perfectly, comparable to participants who had performed an equivalent task in the real world during a previous study. By contrast, participants performed less than 50% of trials perfectly if they used a tethered HMD (move by physically turning but pressing a button to translate) or a desktop display (no body-based information). This is the most complex navigational task in which a real-world level of performance has been achieved in a VE. Behavioral data indicates that both translational and rotational body-based information are required to accurately update one's position during navigation, and participants who walked tended to avoid obstacles, even though collision detection was not implemented and feedback not provided. A walking interface would bring immediate benefits to a number of VE applications.
@article{wrro8632,
note = {{\copyright} 2009 Association for Computing Machinery. This is the author's version of the work. It is posted here by permission of ACM for your personal use. Not for redistribution. The definitive version was published in ACM Transactions on Computer-Human Interaction, 16 (1). 5:1-5:18.
},
number = {1},
year = {2009},
doi = {doi:10.1145/1502800.1502805},
pages = {5:1--5:18},
journal = {ACM Transactions on Computer-Human Interaction},
month = {April},
volume = {16},
title = {The benefits of using a walking interface to navigate virtual environments},
publisher = {Association for Computing Machinery},
issn = {1073-0516},
keywords = {virtual reality, navigation, locomotion, visual fidelity},
abstract = {Navigation is the most common interactive task performed in three-dimensional virtual environments (VEs), but it is also a task that users often find difficult. We investigated how body-based information about the translational and rotational components of movement helped participants to perform a navigational search task (finding targets hidden inside boxes in a room-sized space). When participants physically walked around the VE while viewing it on a head-mounted display (HMD), they then performed 90\% of trials perfectly, comparable to participants who had performed an equivalent task in the real world during a previous study. By contrast, participants performed less than 50\% of trials perfectly if they used a tethered HMD (move by physically turning but pressing a button to translate) or a desktop display (no body-based information). This is the most complex navigational task in which a real-world level of performance has been achieved in a VE. Behavioral data indicates that both translational and rotational body-based information are required to accurately update one's position during navigation, and participants who walked tended to avoid obstacles, even though collision detection was not implemented and feedback not provided. A walking interface would bring immediate benefits to a number of VE applications.
},
author = {Ruddle, R. A. and Lessels, S.},
url = {http://doi.acm.org/10.1145/1502800.1502805}
}
In a lifetime, an "average" person will visit approximately a million webpages. Sometimes a person finds they want to return to a given page at some future date but, having no recollection of where it was (URL, host, etc.) and so has to look for it again from scratch. This paper assesses how a person's memory could be assisted by the presentation of a "map" of their web browsing activity. Three map organisation approaches were investigated: (i) time-based, (ii) place-based, and (iii) topic-based. Time-based organisation is the least suitable, because the temporal specificity of human memory is generally poor. Place-based approaches lack scalability, and are not helped by the fact that there is little repetition in the paths a person follows between places. Topic-based organisation is more promising, with topics derived from both the web content that is accessed and the search queries that are executed, which provide snapshots into a person's cognitive processes by explicitly capturing the terminology of "what" they were looking for at that moment in time. In terms of presentation, a map that combines aspects of network connectivity with a space filling approach is likely to be most effective.
@inproceedings{wrro8631,
booktitle = {WebSci'09: Society On-Line},
publisher = {Web Science Research Initiative},
year = {2009},
title = {Finding information again using
an individual's web history},
journal = {Proceedings of the WebSci '09},
month = {March},
author = {Ruddle, R. A.},
url = {http://journal.webscience.org/168/},
keywords = {Navigation; Web history; Information retrieval},
abstract = {In a lifetime, an "average" person will visit approximately a million webpages. Sometimes a person finds they want to return to a given page at some future date but, having no recollection of where it was (URL, host, etc.) and so has to look for it again from scratch. This paper assesses how a person's memory could be assisted by the presentation of a "map" of their web browsing activity. Three map organisation approaches were investigated: (i) time-based, (ii) place-based, and (iii) topic-based. Time-based organisation is the least suitable, because the temporal specificity of human memory is generally poor. Place-based approaches lack scalability, and are not helped by the fact that there is little repetition in the paths a person follows between places. Topic-based organisation is more promising, with topics derived from both the web content that is accessed and the search queries that are executed, which provide snapshots into a person's cognitive processes by explicitly capturing the terminology of "what" they were looking for at that moment in time. In terms of presentation, a map that combines aspects of network connectivity with a space filling approach is likely to be most effective.}
}
A new method for generating trails from a person's movement through a virtual environment (VE) is described. The method is entirely automatic (no user input is needed), and uses string-matching to identify similar sequences of movement and derive the person's primary trail. The method was evaluated in a virtual building, and generated trails that substantially reduced the distance participants traveled when they searched for target objects in the building 5-8 weeks after a set of familiarization sessions. Only a modest amount of data (typically five traversals of the building) was required to generate trails that were both effective and stable, and the method was not affected by the order in which objects were visited. The trail generation method models an environment as a graph and, therefore, may be applied to aiding navigation in the real world and information spaces, as well as VEs.
@article{wrro4953,
month = {December},
journal = {Presence : Teleoperators and Virtual Environments},
volume = {17},
pages = {562--574},
publisher = {MIT Press},
title = {Generating trails automatically, to aid navigation when you revisit an environment},
address = {6},
note = {{\copyright} 2008 by the Massachusetts Institute of Technology. This is an author produced version of a paper published in Presence : Teleoperators and Virtual Environments. Uploaded in accordance with the publisher's self-archiving policy.},
number = {6},
year = {2008},
doi = {doi:10.1162/pres.17.6.562},
url = {http://dx.doi.org/10.1162/pres.17.6.562},
author = {Ruddle, R. A.},
abstract = {A new method for generating trails from a person's movement through a virtual environment (VE) is described. The method is entirely automatic (no user input is needed), and uses string-matching to identify similar sequences of movement and derive the person's primary trail. The method was evaluated in a virtual building, and generated trails that substantially reduced the distance participants traveled when they searched for target objects in the building 5-8 weeks after a set of familiarization sessions. Only a modest amount of data (typically five traversals of the building) was required to generate trails that were both effective and stable, and the method was not affected by the order in which objects were visited. The trail generation method models an environment as a graph and, therefore, may be applied to aiding navigation in the real world and information spaces, as well as VEs.},
issn = {1531-3263}
}
We have developed techniques called Mobile Group Dynamics (MGDs), which help groups of people to work together while they travel around large-scale virtual environments. MGDs explicitly showed the groups that people had formed themselves into, and helped people move around together and communicate over extended distances. The techniques were evaluated in the context of an urban planning application, by providing one batch of participants with MGDs and another with an interface based on conventional collaborative virtual environments (CVEs). Participants with MGDs spent nearly twice as much time in close proximity (within 10m of their nearest neighbor), communicated seven times more than participants with a conventional interface, and exhibitedreal-world patterns of behavior such as staying together over an extended period of time and regrouping after periods of separation. The study has implications for CVE designers, because it shows how MGDs improves groupwork in CVEs.
@misc{wrro4948,
note = {{\copyright} Copyright 2008 IEEE. Personal use of this material is permitted. However, permission to reprint/republish this material for advertising or promotional purposes or for creating new collective works for resale or redistribution to servers or lists, or to reuse any copyrighted component of this work in other works must be obtained from the IEEE. },
year = {2008},
doi = {doi:10.1109/VR.2008.4480751},
booktitle = {IEEE Virtual Reality 2008},
journal = {Proceedings of IEEE Virtual Reality},
pages = {59--66},
publisher = {IEEE},
title = {Mobile group dynamics in large-scale collaborative virtual
environments},
isbn = {978-1-4244-1971-5},
url = {http://dx.doi.org/10.1109/VR.2008.4480751},
author = {Dodds, T. J. and Ruddle, R. A.},
abstract = {We have developed techniques called Mobile Group Dynamics
(MGDs), which help groups of people to work together while they travel around large-scale virtual environments. MGDs explicitly showed the groups that people had formed themselves into, and helped people move around together and communicate over extended distances. The techniques were evaluated in the context of an urban planning application, by providing one batch of participants with MGDs and another with an interface based on conventional collaborative virtual environments (CVEs). Participants with MGDs spent nearly twice as much time in close proximity (within 10m of their nearest neighbor), communicated seven times
more than participants with a conventional interface, and exhibitedreal-world patterns of behavior such as staying together over an extended period of time and regrouping after periods of separation.
The study has implications for CVE designers, because it shows how MGDs improves groupwork in CVEs.},
keywords = {Collaborative interaction, experimental methods, distributed
VR, usability}
}
Mobile Group Dynamics (MGDs) are a suite of techniques that help people work together in large-scale collaborative virtual environments (CVEs). The present paper describes the implementation and evaluation of three additional MGDs techniques (teleporting, awareness and multiple views) which, when combined, produced a 4 times increase in the amount that participants communicated in a CVE and also significantly increased the extent to which participants communicated over extended distances in the CVE. The MGDs were evaluated using an urban planning scenario using groups of either seven (teleporting + awareness) or eight (teleporting + awareness + multiple views) participants. The study has implications for CVE designers, because it provides quantitative and qualitative data about how teleporting, awareness and multiple views improve groupwork in CVEs. Categories and Subject Descriptors (according to ACM CCS): C.2.4 [Computer-Communication Networks]: Distributed Systems - Distributed applications; H.1.2 [Models and Principles]: User/Machine Systems - Human factors; Software psychology; H.5.1 [Information Interfaces and Presentation]: Multimedia Information Systems - Artificial, augmented and virtual realities; H.5.3 [Information Interfaces and Presentation]: Group and Organization Interfaces - Collaborative computing; Computer-supported cooperative work; Synchronous interaction; I.3.7[Computer Graphics]: Three Dimensional Graphics and Realism - Virtual Reality
@misc{wrro4949,
publisher = {Eurographics Association},
title = {Using teleporting, awareness and multiple views to improve
teamwork in collaborative virtual environments},
journal = {Virtual Environments 2008},
pages = {81--88},
booktitle = {14th Eurographics Symposium on Virtual Environments},
editor = {B. Mohler and R. van Liere},
year = {2008},
note = {Copyright {\copyright} 2008 by the Eurographics Association. This is an author produced version of the paper. The definitive version is available
at diglib.eg.org . Uploaded in accordance with the publisher's self-archiving policy.},
abstract = {Mobile Group Dynamics (MGDs) are a suite of techniques that help people work together in large-scale collaborative virtual environments (CVEs). The present paper describes the implementation and evaluation of three additional MGDs techniques (teleporting, awareness and multiple views) which, when combined, produced a 4 times increase in the amount that participants communicated in a CVE and also significantly increased the extent to which participants communicated over extended distances in the CVE. The MGDs were evaluated using an urban planning scenario using groups of either seven (teleporting + awareness) or eight (teleporting + awareness + multiple views) participants. The study has implications for CVE designers, because it provides quantitative and qualitative data about how teleporting, awareness and multiple views improve groupwork in CVEs. Categories and Subject Descriptors (according to ACM CCS): C.2.4 [Computer-Communication Networks]: Distributed Systems - Distributed applications; H.1.2 [Models and Principles]: User/Machine Systems - Human
factors; Software psychology; H.5.1 [Information Interfaces and Presentation]: Multimedia Information Systems
- Artificial, augmented and virtual realities; H.5.3 [Information Interfaces and Presentation]: Group and Organization Interfaces - Collaborative computing; Computer-supported cooperative work; Synchronous interaction; I.3.7[Computer Graphics]: Three Dimensional Graphics and Realism - Virtual Reality},
author = {Dodds, T. J. and Ruddle, R. A.},
isbn = {978-3-905674-06-4},
url = {http://www.eg.org/EG/DL/WS/EGVE/EGVE08/}
}
Physically large display walls can now be constructed using off-the-shelf computer hardware. The high resolution of these displays (e.g., 50 million pixels) means that a large quantity of data can be presented to users, so the displays are well suited to visualization applications. However, current methods of interacting with display walls are somewhat time consuming. We have analyzed how users solve real visualization problems using three desktop applications (XmdvTool, Iris Explorer and Arc View), and used a new taxonomy to classify users' actions and illustrate the deficiencies of current display wall interaction methods. Following this we designed a novel methodfor interacting with display walls, which aims to let users interact as quickly as when a visualization application is used on a desktop system. Informal feedback gathered from our working prototype shows that interaction is both fast and fluid.
@misc{wrro4950,
editor = {I.S. Lim and W. Tang},
doi = {doi:10.2312/LocalChapterEvents/TPCG/TPCG08/075-082},
booktitle = {The 6th Theory and Practice of Computer Graphics Conference (TP.CG.08)},
year = {2008},
note = {Copyright {\copyright} 2008 by the Eurographics Association. This is an author produced version of the paper. The definitive version is available at diglib.eg.org . Uploaded in accordance with the publisher's self-archiving policy.},
title = {A new method for interacting with multi-window
applications on large, high resolution displays},
publisher = {Eurographics},
pages = {75--82},
journal = {Theory and Practice of Computer Graphics. Proceedings.},
author = {Rooney, C. and Ruddle, R. A.},
isbn = {978-3-905673-67-8},
url = {http://dx.doi.org/10.2312/LocalChapterEvents/TPCG/TPCG08/075-082},
abstract = {Physically large display walls can now be constructed using off-the-shelf computer hardware. The high resolution
of these displays (e.g., 50 million pixels) means that a large quantity of data can be presented to users, so the
displays are well suited to visualization applications. However, current methods of interacting with display walls
are somewhat time consuming. We have analyzed how users solve real visualization problems using three desktop
applications (XmdvTool, Iris Explorer and Arc View), and used a new taxonomy to classify users' actions and
illustrate the deficiencies of current display wall interaction methods. Following this we designed a novel methodfor interacting with display walls, which aims to let users interact as quickly as when a visualization application is used on a desktop system. Informal feedback gathered from our working prototype shows that interaction is both fast and fluid.}
}
Three levels of virtual environment (VE) metric are proposed, based on: (1) users' task performance (time taken, distance traveled and number of errors made), (2) physical behavior (locomotion, looking around, and time and error classification), and (3) decision making (i.e., cognitive) rationale (think aloud, interview and questionnaire). Examples of the use of these metrics are drawn from a detailed review of research into VE wayfinding. A case study from research into the fidelity that is required for efficient VE wayfinding is presented, showing the unsuitability in some circumstances of common metrics of task performance such as time and distance, and the benefits to be gained by making fine-grained analyses of users' behavior. Taken as a whole, the article highlights the range of techniques that have been successfully used to evaluate wayfinding and explains in detail how some of these techniques may be applied.
@article{wrro4959,
publisher = {MIT Press},
title = {Three levels of metric for evaluating wayfinding},
volume = {15},
journal = {Presence: Teleoperators and Virtual Environments},
month = {December},
pages = {637--654},
doi = {doi:10.1162/pres.15.6.637},
year = {2006},
note = {Copyright {\copyright} 2006 by the Massachusetts Institute of Technology. This is an author produced version of a paper published in Presence : Teleoperators and Virtual Environments. Uploaded in accordance with the publisher's self-archiving policy.},
number = {6},
issn = {1531-3263},
abstract = {Three levels of virtual environment (VE) metric are proposed, based on: (1) users' task performance (time taken, distance traveled and number of errors made), (2) physical behavior (locomotion, looking around, and time and error classification), and (3) decision making (i.e., cognitive) rationale (think aloud, interview and questionnaire). Examples of the use of these metrics are drawn from a detailed review of research into VE wayfinding. A case study from research into the fidelity that is required for efficient VE wayfinding is presented, showing the unsuitability in some circumstances of common metrics of task performance such as time and distance, and the benefits to be gained by making fine-grained analyses of users' behavior. Taken as a whole, the article highlights the range of techniques that have been successfully used to evaluate wayfinding and explains in detail how some of these techniques may be applied.},
author = {Lessels, S. and Ruddle, R. A.},
url = {http://dx.doi.org/10.1162/pres.15.6.637}
}
During navigation, humans combine visual information from their surroundings with body-based information from the translational and rotational components of movement. Theories of navigation focus on the role of visual and rotational body-based information, even though experimental evidence shows they are not sufficient for complex spatial tasks. To investigate the contribution of all three sources of information, we asked participants to search a computer generated "virtual" room for targets. Participants were provided with either only visual information, or visual supplemented with body-based information for all movement (walk group) or rotational movement (rotate group). The walk group performed the task with near-perfect efficiency, irrespective of whether a rich or impoverished visual scene was provided. The visual-only and rotate groups were significantly less efficient, and frequently searched parts of the room at least twice. This suggests full physical movement plays a critical role in navigational search, but only moderate visual detail is required.
@article{wrro4958,
publisher = {Blackwell Science},
title = {For efficient navigational search, humans require full physical movement but not a rich visual scene},
journal = {Psychological Science},
month = {June},
volume = {17},
pages = {460--465},
year = {2006},
doi = {doi:10.1111/j.1467-9280.2006.01728.x},
number = {6},
note = {{\copyright} 2006 American Psychological Society. This is an author produced version of a paper published in Psychological Science. Uploaded in accordance with the publisher's self-archiving policy.},
abstract = {During navigation, humans combine visual information from their surroundings with body-based information from the translational and rotational components of movement. Theories of navigation focus on the role of visual and rotational body-based information, even though experimental evidence shows they are not sufficient for complex spatial tasks. To investigate the contribution of all three sources of information, we asked participants to search a computer generated "virtual" room for targets. Participants were provided with either only visual information, or visual supplemented with body-based information for all movement (walk group) or rotational movement (rotate group). The walk group performed the task with near-perfect efficiency, irrespective of whether a rich or impoverished visual scene was provided. The visual-only and rotate groups were significantly less efficient, and frequently searched parts of the room at least twice. This suggests full physical movement plays a critical role in navigational search, but only moderate visual detail is required.},
issn = {1467-9280},
url = {http://dx.doi.org/10.1111/j.1467-9280.2006.01728.x},
author = {Ruddle, R. A. and Lessels, S.}
}
A method of using string-matching to analyze hypertext navigation was developed, and evaluated using two weeks of website logfile data. The method is divided into phases that use: (i) exact string-matching to calculate subsequences of links that were repeated in different navigation sessions (common trails through the website), and then (ii) inexact matching to find other similar sessions (a community of users with a similar interest). The evaluation showed how subsequences could be used to understand the information pathways users chose to follow within a website, and that exact and inexact matching provided complementary ways of identifying information that may have been of interest to a whole community of users, but which was only found by a minority. This illustrates how string-matching could be used to improve the structure of hypertext collections.
@misc{wrro4957,
pages = {49--52},
journal = {Proceedings of the 17th ACM Conference on Hypertext and Hypermedia},
address = {New York, NY},
title = {Using string-matching to analyze hypertext navigation},
publisher = {ACM},
note = {Copyright {\copyright} 2006 by the Association for Computing
Machinery, Inc. (ACM). This is an author produced version of a paper published in Proceedings of the 17th ACM Conference on Hypertext and Hypermedia. Uploaded in accordance with the publisher's self-archiving policy.},
booktitle = {Seventeenth Conference on Hypertext and Hypermedia},
doi = {doi:10.1145/1149941.1149952},
year = {2006},
keywords = {Navigation, String-matching, Analysis.},
abstract = {A method of using string-matching to analyze hypertext navigation was developed, and evaluated using two weeks of website logfile data. The method is divided into phases that use: (i) exact string-matching to calculate subsequences of links that were repeated in different navigation sessions (common trails through the website), and then (ii) inexact matching to find other similar sessions (a community of users with a similar interest). The evaluation showed how subsequences could be used to understand the information pathways users chose to follow within a website, and that exact and inexact matching provided complementary ways of identifying information that may have been of interest to a whole community of users, but which was only found by a minority. This illustrates how string-matching could be used to improve the structure of hypertext collections.},
author = {Ruddle, R. A.},
isbn = {1-59593-417-0},
url = {http://dx.doi.org/10.1145/1149941.1149952}
}
Two experiments investigated participants' ability to search for targets in a cluttered small-scale space. The first experiment was conducted in the real world with two field of view conditions (full vs. restricted), and participants found the task trivial to perform in both. The second experiment used the same search task but was conducted in a desktop virtual environment (VE), and investigated two movement interfaces and two visual scene conditions. Participants restricted to forward only movement performed the search task quicker and more efficiently (visiting fewer targets) than those who used an interface that allowed more flexible movement (forward, backward, left, right, and diagonal). Also, participants using a high fidelity visual scene performed the task significantly quicker and more efficiently than those who used a low fidelity scene. The performance differences between all the conditions decreased with practice, but the performance of the best VE group approached that of the real-world participants. These results indicate the importance of using high fidelity scenes in VEs, and suggest that the use of a simple control system is sufficient for maintaining ones spatial orientation during searching.
@article{wrro4960,
pages = {580--596},
journal = {Presence : Teleoperators and Virtual Environments},
month = {October},
volume = {14},
title = {Movement around real and virtual cluttered environments},
publisher = {MIT Press},
note = {{\copyright} 2005 MIT Press. This is an author produced version of a paper published in Presence. Uploaded in accordance with the publisher's self archiving policy.},
number = {5},
year = {2005},
doi = {doi:10.1162/105474605774918778},
issn = {1531-3263},
abstract = {Two experiments investigated participants' ability to search for targets in a cluttered small-scale space. The first experiment was conducted in the real world with two field of view conditions (full vs. restricted), and participants found the task trivial to perform in both. The second experiment used the same search task but was conducted in a desktop virtual environment (VE), and investigated two movement interfaces and two visual scene conditions. Participants restricted to forward only movement performed the search task quicker and more efficiently (visiting fewer targets) than those who used an interface that allowed more flexible movement (forward, backward, left, right, and diagonal). Also, participants using a high fidelity visual scene performed the task significantly quicker and more efficiently than those who used a low fidelity scene. The performance differences between all the conditions decreased with practice, but the performance of the best VE group approached that of the real-world participants. These results indicate the importance of using high fidelity scenes in VEs, and suggest that the use of a simple control system is sufficient for maintaining ones spatial orientation during searching.},
author = {Lessels, S. and Ruddle, R. A.},
url = {http://dx.doi.org/10.1162/105474605774918778}
}
Trails are a little-researched type of aid that offers great potential benefits for navigation, especially in virtual environments (VEs). An experiment was performed in which participants repeatedly searched a virtual building for target objects assisted by: (1) a trail, (2) landmarks, (3) a trail and landmarks, or (4) neither. The trail was displayed as a white line that showed exactly where a participant had` previously traveled. The trail halved the distance that participants traveled during first-time searches, indicating the immediate benefit to users if even a crude form of trail were implemented in a variety of VE applications. However, the general clutter or "pollution" produced by trails reduced the benefit during subsequent navigation and, in the later stages of these searches, caused participants to travel more than twice as far as they needed to, often accidentally bypassing targets even when a trail led directly to them. The proposed solution is to use gene alignment techniques to extract a participant's primary trail from the overall, polluted trail, and graphically emphasize the primary trail to aid navigation.
@misc{wrro4961,
year = {2005},
doi = {doi:10.1109/VR.2005.1492761},
booktitle = {IEEE VR, 2005},
editor = {B. Frohlich and S. Julier and H. Takemura},
note = {{\copyright} Copyright 2005 IEEE. Personal use of this material is permitted. However, permission to reprint/republish this material for advertising or promotional purposes or for creating new collective works for resale or redistribution to servers or lists, or to reuse any copyrighted component of this work in other works must be obtained from the IEEE. },
publisher = {IEEE},
title = {The effect of trails on first-time and subsequent navigation
in a virtual environment},
journal = {Conference Proceedings. IEEE Virtual Reality 2005},
pages = {115--122},
author = {Ruddle, R. A.},
url = {http://dx.doi.org/10.1109/VR.2005.1492761},
isbn = {0-7803-8929-8},
keywords = {Virtual Environment, Navigation, Navigation Aid,
Trail, Landmark},
abstract = {Trails are a little-researched type of aid that offers great potential
benefits for navigation, especially in virtual environments (VEs).
An experiment was performed in which participants repeatedly
searched a virtual building for target objects assisted by: (1) a
trail, (2) landmarks, (3) a trail and landmarks, or (4) neither. The
trail was displayed as a white line that showed exactly where a
participant had` previously traveled. The trail halved the distance
that participants traveled during first-time searches, indicating the
immediate benefit to users if even a crude form of trail were
implemented in a variety of VE applications. However, the
general clutter or "pollution" produced by trails reduced the
benefit during subsequent navigation and, in the later stages of
these searches, caused participants to travel more than twice as far
as they needed to, often accidentally bypassing targets even when
a trail led directly to them. The proposed solution is to use gene
alignment techniques to extract a participant's primary trail from
the overall, polluted trail, and graphically emphasize the primary
trail to aid navigation.}
}
The difficulties people frequently have navigating in virtual environments (VEs) are well known. Usually these difficulties are quantified in terms of performance (e.g., time taken or number of errors made in following a path), with these data used to compare navigation in VEs to equivalent real-world settings. However, an important cause of any performance differences is changes in people's navigational behaviour. This paper reports a study that investigated the effect of visual scene fidelity and field of view (FOV) on participants' behaviour in a navigational search task, to help identify the thresholds of fidelity that are required for efficient VE navigation. With a wide FOV (144 degrees), participants spent significantly larger proportion of their time travelling through the VE, whereas participants who used a normal FOV (48 degrees) spent significantly longer standing in one place planning where to travel. Also, participants who used a wide FOV and a high fidelity scene came significantly closer to conducting the search "perfectly" (visiting each place once). In an earlier real-world study, participants completed 93\% of their searches perfectly and planned where to travel while they moved. Thus, navigating a high fidelity VE with a wide FOV increased the similarity between VE and real-world navigational behaviour, which has important implications for both VE design and understanding human navigation. Detailed analysis of the errors that participants made during their non-perfect searches highlighted a dramatic difference between the two FOVs. With a narrow FOV participants often travelled right past a target without it appearing on the display, whereas with the wide FOV targets that were displayed towards the sides of participants overall FOV were often not searched, indicating a problem with the demands made by such a wide FOV display on human visual attention.
@misc{wrro4962,
booktitle = {EGVE'04},
editor = {S. Coquillart and M. G{\"o}bel},
year = {2004},
note = {Copyright {\copyright} 2004 by the Eurographics Association. This is an author produced version of the paper. The definitive version is available at diglib.eg.org . Uploaded in accordance with the publisher's self-archiving policy. },
title = {Changes in navigational behaviour produced by a wide field of view and a high fidelity visual scene},
publisher = {Eurographics},
pages = {71--78},
journal = {Proceedings of the 10th Eurographics Symposium on Virtual Environments},
url = {http://www.eg.org/EG/DL/WS/EGVE/EGVE04/VE04.pdf},
isbn = {3-905673-10-X},
author = {Lessels, S. and Ruddle, R. A.},
abstract = {The difficulties people frequently have navigating in virtual environments (VEs) are well known. Usually these
difficulties are quantified in terms of performance (e.g., time taken or number of errors made in following a path),
with these data used to compare navigation in VEs to equivalent real-world settings. However, an important cause
of any performance differences is changes in people's navigational behaviour. This paper reports a study that
investigated the effect of visual scene fidelity and field of view (FOV) on participants' behaviour in a navigational
search task, to help identify the thresholds of fidelity that are required for efficient VE navigation. With a wide FOV
(144 degrees), participants spent significantly larger proportion of their time travelling through the VE, whereas
participants who used a normal FOV (48 degrees) spent significantly longer standing in one place planning where
to travel. Also, participants who used a wide FOV and a high fidelity scene came significantly closer to conducting
the search "perfectly" (visiting each place once). In an earlier real-world study, participants completed 93\% of
their searches perfectly and planned where to travel while they moved. Thus, navigating a high fidelity VE with
a wide FOV increased the similarity between VE and real-world navigational behaviour, which has important
implications for both VE design and understanding human navigation.
Detailed analysis of the errors that participants made during their non-perfect searches highlighted a dramatic
difference between the two FOVs. With a narrow FOV participants often travelled right past a target without it
appearing on the display, whereas with the wide FOV targets that were displayed towards the sides of participants
overall FOV were often not searched, indicating a problem with the demands made by such a wide FOV display
on human visual attention.}
}
Three experiments investigated the effect of implementing low-level aspects of motor control for a collaborative carrying task within a VE interface, leaving participants free to devote their cognitive resources to the higher-level components of the task. In the task, participants collaborated with an autonomous virtual human in an immersive virtual environment (VE) to carry an object along a predefined path. In experiment 1, participants took up to three times longer to perform the task with a conventional VE interface, in which they had to explicitly coordinate their hand and body movements, than with an interface that controlled the low-level tasks of grasping and holding onto the virtual object. Experiments 2 and 3 extended the study to include the task of carrying an object along a path that contained obstacles to movement. By allowing participants' virtual arms to stretch slightly, the interface software was able to take over some aspects of obstacle avoidance (another low-level task), and this led to further significant reductions in the time that participants took to perform the carrying task. Improvements in performance also occurred when participants used a tethered viewpoint to control their movements because they could see their immediate surroundings in the VEs. This latter finding demonstrates the superiority of a tethered view perspective to a conventional, human'seye perspective for this type of task.
@article{wrro1422,
number = {2},
note = {{\copyright} 2003 The Massachusetts Institute of Technology. Reproduced in accordance with the publisher's self-archiving policy.},
year = {2003},
doi = {doi:10.1162/105474603321640914},
month = {April},
journal = {Presence: Teleoperators \& Virtual Environments},
volume = {12},
pages = {140--155},
publisher = {MIT Press},
title = {Levels of control during a collaborative carrying task},
url = {http://dx.doi.org/10.1162/105474603321640914},
author = {Ruddle, R. A. and Savage, J. C. D. and Jones, D. M.},
abstract = {Three experiments investigated the effect of implementing low-level aspects of motor control for a collaborative carrying task within a VE interface, leaving participants free to devote their cognitive resources to the higher-level components of the task. In the task, participants collaborated with an autonomous virtual human in an immersive virtual environment (VE) to carry an object along a predefined path. In experiment 1, participants took up to three times longer to perform the task with a conventional VE interface, in which they had to explicitly coordinate their hand and body movements, than with an interface that controlled the low-level tasks of grasping and holding onto the virtual object.
Experiments 2 and 3 extended the study to include the task of carrying an object along a path that contained obstacles to movement. By allowing participants' virtual arms to stretch slightly, the interface software was able to take over some aspects of obstacle avoidance (another low-level task), and this led to further significant reductions in the time that participants took to perform the carrying task. Improvements in performance also occurred when participants used a tethered viewpoint to control their movements because they could see their immediate surroundings in the VEs. This latter finding demonstrates the superiority of a tethered view perspective to a conventional, human'seye perspective for this type of task.},
issn = {1054-7460}
}
Cooperation between multiple users in a virtual environment (VE) can take place at one of three levels. These are defined as where users can perceive each other (Level 1), individually change the scene (Level 2), or simultaneously act on and manipulate the same object (Level 3). Despite representing the highest level of cooperation, multi-user object manipulation has rarely been studied. This paper describes a behavioral experiment in which the piano movers' problem (maneuvering a large object through a restricted space) was used to investigate object manipulation by pairs of participants in a VE. Participants' interactions with the object were integrated together either symmetrically or asymmetrically. The former only allowed the common component of participants' actions to take place, but the latter used the mean. Symmetric action integration was superior for sections of the task when both participants had to perform similar actions, but if participants had to move in different ways (e.g., one maneuvering themselves through a narrow opening while the other traveled down a wide corridor) then asymmetric integration was superior. With both forms of integration, the extent to which participants coordinated their actions was poor and this led to a substantial cooperation overhead (the reduction in performance caused by having to cooperate with another person).
@article{wrro4965,
number = {4},
note = {{\copyright} 2002 ACM. This is an author produced version of a paper published in ACM Transactions on Computer-Human Interaction. Uploaded in accordance with the publisher's self-archiving policy.},
year = {2002},
doi = {doi:10.1145/586081.586084},
pages = {285--308},
journal = {ACM Transactions on Computer-Human Interaction (TOCHI)},
month = {December},
volume = {9},
title = {Symmetric and asymmetric action integration
during cooperative object manipulation in virtual
environments},
publisher = {ACM},
abstract = {Cooperation between multiple users in a virtual environment (VE) can take place at one of three levels. These
are defined as where users can perceive each other (Level 1), individually change the scene (Level 2), or
simultaneously act on and manipulate the same object (Level 3). Despite representing the highest level of
cooperation, multi-user object manipulation has rarely been studied. This paper describes a behavioral
experiment in which the piano movers' problem (maneuvering a large object through a restricted space) was
used to investigate object manipulation by pairs of participants in a VE. Participants' interactions with the object
were integrated together either symmetrically or asymmetrically. The former only allowed the common
component of participants' actions to take place, but the latter used the mean. Symmetric action integration was
superior for sections of the task when both participants had to perform similar actions, but if participants had to
move in different ways (e.g., one maneuvering themselves through a narrow opening while the other traveled
down a wide corridor) then asymmetric integration was superior. With both forms of integration, the extent to
which participants coordinated their actions was poor and this led to a substantial cooperation overhead (the
reduction in performance caused by having to cooperate with another person).},
issn = {1073-0516},
keywords = {Virtual environments, object manipulation, piano movers' problem, rules of interaction.},
url = {http://dx.doi.org/10.1145/586081.586084},
author = {Jones, D. M. and Ruddle, R. A. and Savage, J. C.}
}
A set of rules is presented for the design of interfaces that allow virtual objects to be manipulated in 3D virtual environments (VEs). The rules differ from other interaction techniques because they focus on the problems of manipulating objects in cluttered spaces rather than open spaces. Two experiments are described that were used to evaluate the effect of different interaction rules on participants' performance when they performed a task known as "the piano mover's problem." This task involved participants in moving a virtual human through parts of a virtual building while simultaneously manipulating a large virtual object that was held in the virtual human's hands, resembling the simulation of manual materials handling in a VE for ergonomic design. Throughout, participants viewed the VE on a large monitor, using an "over-the-shoulder" perspective. In the most cluttered VEs, the time that participants took to complete the task varied by up to 76\% with different combinations of rules, thus indicating the need for flexible forms of interaction in such environments.
@article{wrro1423,
pages = {591--609},
volume = {11},
journal = {Presence: Teleoperators \& Virtual Environments},
month = {December},
title = {Evaluating rules of interaction for object manipulation in cluttered virtual environments},
publisher = {MIT Press},
number = {6},
note = {{\copyright} 2002 The Massachusetts Institute of Technology. Reproduced in accordance with the publisher's self-archiving policy.},
doi = {doi:10.1162/105474602321050721},
year = {2002},
issn = {1054-7460},
abstract = {A set of rules is presented for the design of interfaces that allow virtual objects to be manipulated in 3D virtual environments (VEs). The rules differ from other interaction techniques because they focus on the problems of manipulating objects in cluttered spaces rather than open spaces. Two experiments are described that were used to evaluate the effect of different interaction rules on participants' performance when they performed a task known as "the piano mover's problem." This task involved participants in moving a virtual human through parts of a virtual building while simultaneously manipulating a large virtual object that was held in the virtual human's hands, resembling the simulation of manual materials handling in a VE for ergonomic design. Throughout, participants viewed the VE on a large monitor, using an "over-the-shoulder" perspective. In the most cluttered VEs, the time that participants took to complete the task varied by up to 76\% with different combinations of rules, thus indicating the need for flexible forms of interaction in such environments.},
author = {Ruddle, R. A. and Savage, J. C. D. and Jones, D. M.},
url = {http://dx.doi.org/10.1162/105474602321050721}
}
Object manipulation in cluttered virtual environments (VEs) brings additional challenges to the design of interaction algorithms, when compared with open virtual spaces. As the complexity of the algorithms increases so does the flexibility with which users can interact, but this is at the expense of much greater difficulties in implementation for developers. Three rules that increase the realism and flexibility of interaction are outlined: collision response, order of control, and physical compatibility. The implementation of each is described, highlighting the substantial increase in algorithm complexity that arises. Data are reported from an experiment in which participants manipulated a bulky virtual object through parts of a virtual building (the piano movers' problem). These data illustrate the benefits to users that accrue from implementing flexible rules of interaction.
@misc{wrro4964,
year = {2002},
doi = {doi:10.1145/585740.585756},
booktitle = {VRST'02},
note = {Copyright 2002 ACM. This is an author produced version of a paper published in Proceedings of the ACM Symposium on Virtual Reality Software and Technology. Uploaded in accordance with the publisher's self-archiving policy.},
publisher = {ACM},
title = {Implementing flexible rules of interaction for
object manipulation in cluttered virtual environments},
journal = {Proceedings of the ACM Symposium on Virtual Reality Software and Technology},
pages = {89--96},
isbn = {1-58113-530-0},
url = {http://dx.doi.org/10.1145/585740.585756},
author = {Jones, D. M. and Ruddle, R. A. and Savage, J. C.},
abstract = {
Object manipulation in cluttered virtual environments (VEs)
brings additional challenges to the design of interaction
algorithms, when compared with open virtual spaces. As the
complexity of the algorithms increases so does the flexibility with
which users can interact, but this is at the expense of much
greater difficulties in implementation for developers. Three rules
that increase the realism and flexibility of interaction are outlined:
collision response, order of control, and physical compatibility.
The implementation of each is described, highlighting the
substantial increase in algorithm complexity that arises. Data are
reported from an experiment in which participants manipulated a
bulky virtual object through parts of a virtual building (the piano
movers' problem). These data illustrate the benefits to users that
accrue from implementing flexible rules of interaction.},
keywords = {Virtual Environments, Object Manipulation, Rules of Interaction.}
}
Cooperation between multiple users in a virtual environment (VE) can take place at one of three levels, but it is only at the highest level that users can simultaneously interact with the same object. This paper describes a study in a straightforward realworld task (maneuvering a large object through a restricted space)was used to investigate object manipulation by pairs of participants in a VE, and focuses on the verbal communication that took place. This communication was analyzed using both categorizing and conversation analysis techniques. Of particular note was the sheer volume of communication that took place. One third of this was instructions from one participant to another of the locomotion and manipulation movements that they should make. Another quarter was general communication that was not directly related to performance of the experimental task, and often involved explicit statements of participants' actions or requests for clarification about what was happening. Further research is required to determine the extent to which haptic and auditory feedback reduce the need for inter-participant communication in collaborative tasks.
@misc{wrro5420,
year = {2002},
doi = {doi:10.1145/571878.571897},
booktitle = {CVE'02},
journal = {Collaborative Virtual Environments. Proceedings of the 4th International Conference on Collaborative Virtual Environments},
pages = {120--127},
publisher = {ACM},
title = {Verbal communication during cooperative object manipulation
},
address = {New York},
abstract = {Cooperation between multiple users in a virtual environment
(VE) can take place at one of three levels, but it is only at the highest level that users can simultaneously interact with the same object. This paper describes a study in a straightforward realworld task (maneuvering a large object through a restricted space)was used to investigate object manipulation by pairs of participants in a VE, and focuses on the verbal communication that took place. This communication was analyzed using both categorizing and conversation analysis techniques. Of particular note was the sheer volume of communication that took place. One
third of this was instructions from one participant to another of the locomotion and manipulation movements that they should make. Another quarter was general communication that was not directly related to performance of the experimental task, and often involved explicit statements of participants' actions or requests for clarification about what was happening. Further research is required to determine the extent to which haptic and auditory feedback reduce the need for inter-participant communication in
collaborative tasks.},
keywords = {Virtual Environments, Object Manipulation, Verbal
Communication, Piano Movers' Problem, Rules of Interaction.},
isbn = {1-58113-489-4},
url = {http://dx.doi.org/10.1145/571878.571897},
author = {Ruddle, R. A. and Savage, J. C. D. and Jones, D. M.}
}
Data is presented from virtual environment (VE) navigation studies that used building- and chessboard-type layouts. Participants learned by repeated navigation, spending several hours in each environment. While some participants quickly learned to navigate efficiently, others remained almost totally disoriented. In the virtual buildings this disorientation was illustrated by mean direction estimate errors of approximately 90o, and in the chessboard VEs disorientation was highlighted by the large number of rooms that some participants visited. Part of the cause of disorientation, and generally slow spatial learning, lies in the difficulty participants had learning the paths they had followed through the VEs.
@incollection{wrro5422,
note = {Uploaded in accordance with the publisher's self-archiving policy.},
booktitle = {Engineering Psychology and Cognitive Ergonomics - Volume Six : Industrial Ergonomics, HCI, and Applied Cognitive Psychology},
editor = {D. Harris},
year = {2001},
pages = {135--142},
volume = {6},
month = {October},
journal = {Engineering psychology and cognitive ergonomics},
title = {Navigation: am I really lost or virtually there?},
publisher = {Ashgate},
isbn = {978-0-7546-1338-1},
url = {https://eprints.whiterose.ac.uk/id/eprint/5422/},
author = {Ruddle, R. A.},
abstract = {Data is presented from virtual environment (VE) navigation studies that used building- and chessboard-type layouts. Participants learned by repeated navigation, spending several hours in each environment. While some participants quickly learned to navigate efficiently, others remained almost totally disoriented. In the virtual buildings this disorientation was illustrated by mean direction estimate errors of approximately 90o, and in the chessboard VEs disorientation was highlighted by the large number of rooms that some participants visited. Part of the cause of disorientation, and generally slow spatial learning, lies in the difficulty participants had learning the paths they had followed through the VEs.}
}
Imagine walking around a cluttered room but then having little idea of where you have traveled. This frequently happens when people move around small virtual environments (VEs), searching for targets. In three experiments, participants searched small-scale VEs using different movement interfaces, collision response algorithms, and fields of view. Participants' searches were most efficient in terms of distance traveled, time taken, and path followed when the simplest form of movement (view direction) was used in conjunction with a response algorithm that guided ("slipped") them around obstacles when collisions occurred. Unexpectedly, and in both immersive and desktop VEs, participants often had great difficulty finding the targets, despite the fact that participants could see the whole VE if they stood in one place and turned around. Thus, the trivial real-world task used in the present study highlights a basic problem with current VE systems.
@article{wrro1425,
doi = {doi:10.1162/105474601753132687},
year = {2001},
number = {5},
note = {{\copyright} 2001 The Massachusetts Institute of Technology. Reproduced in accordance with the publisher's self-archiving policy.},
publisher = {MIT Press},
title = {Movement in cluttered virtual environments},
volume = {10},
journal = {Presence: Teleoperators \& Virtual Environments},
month = {October},
pages = {511--524},
issn = {1054-7460},
abstract = {Imagine walking around a cluttered room but then having little idea of where you have traveled. This frequently happens when people move around small virtual environments (VEs), searching for targets. In three experiments, participants searched small-scale VEs using different movement interfaces, collision response algorithms, and fields of view. Participants' searches were most efficient in terms of distance traveled, time taken, and path followed when the simplest form of movement (view direction) was used in conjunction with a response algorithm that guided ("slipped") them around obstacles when collisions occurred. Unexpectedly, and in both immersive and desktop VEs, participants often had great difficulty finding the targets, despite the fact that participants could see the whole VE if they stood in one place and turned around. Thus, the trivial real-world task used in the present study highlights a basic problem with current VE systems.},
author = {Ruddle, R. A. and Jones, D. M.},
url = {http://dx.doi.org/10.1162/105474601753132687}
}
Hyperlinks introduce discontinuities of movement to 3-D virtual environments (VEs). Nine independent attributes of hyperlinks are defined and their likely effects on navigation in VEs are discussed. Four experiments are described in which participants repeatedly navigated VEs that were either conventional (i.e. obeyed the laws of Euclidean space), or contained hyperlinks. Participants learned spatial knowledge slowly in both types of environment, echoing the findings of previous studies that used conventional VEs. The detrimental effects on participants' spatial knowledge of using hyperlinks for movement were reduced when a time-delay was introduced, but participants still developed less accurate knowledge than they did in the conventional VEs. Visual continuity had a greater influence on participants' rate of learning than continuity of movement, and participants were able to exploit hyperlinks that connected together disparate regions of a VE to reduce travel time.
@article{wrro76425,
pages = {551 -- 581},
volume = {53},
month = {October},
journal = {International Journal of Human Computer Studies},
title = {Effects of hyperlinks on navigation in virtual environments},
publisher = {Elsevier},
note = {{\copyright} 2000, Elsevier. This is an author produced version of a paper published in International Journal of Human Computer Studies. Uploaded in accordance with the publisher's self-archiving policy.
},
number = {4},
doi = {10.1006/ijhc.2000.0402},
year = {2000},
abstract = {Hyperlinks introduce discontinuities of movement to 3-D virtual environments (VEs). Nine independent attributes of hyperlinks are defined and their likely effects on navigation in VEs are discussed. Four experiments are described in which participants repeatedly navigated VEs that were either conventional (i.e. obeyed the laws of Euclidean space), or contained hyperlinks. Participants learned spatial knowledge slowly in both types of environment, echoing the findings of previous studies that used conventional VEs. The detrimental effects on participants' spatial knowledge of using hyperlinks for movement were reduced when a time-delay was introduced, but participants still developed less accurate knowledge than they did in the conventional VEs. Visual continuity had a greater influence on participants' rate of learning than continuity of movement, and participants were able to exploit hyperlinks that connected together disparate regions of a VE to reduce travel time.},
issn = {1071-5819},
url = {http://dx.doi.org/10.1006/ijhc.2000.0402},
author = {Ruddle, RA and Howes, A and Payne, SJ and Jones, DM}
}
Participants used a helmet-mounted display (HMD) and a desk-top (monitor) display to learn the layouts of two large-scale virtual environments (VEs) through repeated, direct navigational experience. Both VEs were "virtual buildings" containing more than seventy rooms. Participants using the HMD navigated the buildings significantly more quickly and developed a significantly more accurate sense of relative straight-line distance. There was no significant difference between the two types of display in terms of the distance that participants traveled or the mean accuracy of their direction estimates. Behavioral analyses showed that participants took advantage of the natural, head-tracked interface provided by the HMD in ways that included "looking around" more often while traveling through the VEs, and spending less time stationary in the VEs while choosing a direction in which to travel.
@article{wrro76426,
doi = {10.1162/105474699566143},
year = {1999},
number = {2},
note = {{\copyright} 1999, Massachusetts Institute of Technology Press. Reproduced in accordance with the publisher's self-archiving policy.},
title = {Navigating large-scale virtual environments: What differences occur between helmet-mounted and desk-top displays?},
publisher = {Massachusetts Institute of Technology Press},
pages = {157 -- 168},
volume = {8},
month = {April},
journal = {Presence: Teleoperators and Virtual Environments},
abstract = {Participants used a helmet-mounted display (HMD) and a desk-top (monitor) display to learn the layouts of two large-scale virtual environments (VEs) through repeated, direct navigational experience. Both VEs were "virtual buildings" containing more than seventy rooms. Participants using the HMD navigated the buildings significantly more quickly and developed a significantly more accurate sense of relative straight-line distance. There was no significant difference between the two types of display in terms of the distance that participants traveled or the mean accuracy of their direction estimates. Behavioral analyses showed that participants took advantage of the natural, head-tracked interface provided by the HMD in ways that included "looking around" more often while traveling through the VEs, and spending less time stationary in the VEs while choosing a direction in which to travel.},
issn = {1054-7460},
url = {http://dx.doi.org/10.1162/105474699566143},
author = {Ruddle, RA and Payne, SJ and Jones, DM}
}
Participants used maps and other navigational aids to search desktop (nonimmersive) virtual environments (VEs) for objects that were small and not visible on a global map that showed the whole of a VE and its major topological features. Overall, participants searched most efficiently when they simultaneously used both the global map and a local map that showed their immediate surroundings and the objects' positions. However, after repeated searching, the global map on its own became equally effective. When participants used the local map on its own, their spatial knowledge developed in a manner that was previously associated with learning from a within-environment perspective rather than a survey perspective. Implications for the use of maps as aids for VE navigation are discussed.
@article{wrro76427,
pages = {54 -- 75},
volume = {5},
month = {March},
journal = {Journal of Experimental Psychology: Applied},
title = {The effects of maps on navigation and search strategies in very-large-scale virtual environments},
publisher = {American Psychological Association},
note = {{\copyright} 1999, American Psychological Association. This is an author produced version of a paper published in Journal of Experimental Psychology: Applied. Uploaded in accordance with the publisher's self-archiving policy. This article may not exactly replicate the final version published in the APA journal. It is not the copy of record.
},
number = {1},
doi = {10.1037/1076-898X.5.1.54},
year = {1999},
author = {Ruddle, RA and Payne, SJ and Jones, DM},
url = {http://dx.doi.org/10.1037/1076-898X.5.1.54},
issn = {1076-898X},
abstract = {Participants used maps and other navigational aids to search desktop (nonimmersive) virtual environments (VEs) for objects that were small and not visible on a global map that showed the whole of a VE and its major topological features. Overall, participants searched most efficiently when they simultaneously used both the global map and a local map that showed their immediate surroundings and the objects' positions. However, after repeated searching, the global map on its own became equally effective. When participants used the local map on its own, their spatial knowledge developed in a manner that was previously associated with learning from a within-environment perspective rather than a survey perspective. Implications for the use of maps as aids for VE navigation are discussed.}
}
Participants used a helmet-mounted display (HMD) and a desk-top (monitor) display to learn the layouts of two large-scale virtual environments (VEs) through repeated, direct navigational experience. Both VEs were ''virtual buildings'' containing more than seventy rooms. Participants using the HMD navigated the buildings significantly more quickly and developed a significantly more accurate sense of relative straight-line distance. There was no significant difference between the two types of display in terms of the distance that participants traveled or the mean accuracy of their direction estimates. Behavioral analyses showed that participants took advantage of the natural, head-tracked interface provided by the HMD in ways that included ''looking around''more often while traveling through the VEs, and spending less time stationary in the VEs while choosing a direction in which to travel.
@article{wrro5428,
doi = {doi:10.1162/105474699566143},
year = {1999},
note = {{\copyright} 1999 Massachusetts Institute of Technology. Reproduced in accordance with the publisher's self-archiving policy.
},
number = {2},
publisher = {Massachusetts Institute of Technology Press},
title = {Navigating large-scale virtual environments: what differences occur between helmet-mounted and desk-top displays?},
volume = {8},
journal = {Presence : Teleoperators and Virtual Environments},
pages = {157--168},
url = {http://dx.doi.org/10.1162/105474699566143},
author = {Ruddle, R. A. and Payne, S. J. and Jones, D. M.},
abstract = {Participants used a helmet-mounted display (HMD) and a desk-top (monitor) display to learn the layouts of two large-scale virtual environments (VEs) through repeated, direct navigational experience. Both VEs were ''virtual buildings'' containing more than seventy rooms. Participants using the HMD navigated the buildings significantly more quickly and developed a significantly more accurate sense of relative straight-line distance.
There was no significant difference between the two types of display in terms of the distance that participants traveled or the mean accuracy of their direction estimates.
Behavioral analyses showed that participants took advantage of the natural, head-tracked interface provided by the HMD in ways that included ''looking around''more often while traveling through the VEs, and spending less time stationary in the VEs while choosing a direction in which to travel.
},
issn = {1054-7460}
}
Two experiments investigated components of participants' spatial knowledge when they navigated large-scale ''virtual buildings'' using ''desk-top'' (i.e., nonimmersive) virtual environments (VEs). Experiment 1 showed that participants could estimate directions with reasonable accuracy when they traveled along paths that contained one or two turns (changes of direction), but participants' estimates were significantly less accurate when the paths contained three turns. In Experiment 2 participants repeatedly navigated two more complex virtual buildings, one with and the other without a compass. The accuracy of participants' route-finding and their direction and relative straight-line distance estimates improved with experience, but there were no significant differences between the two compass conditions. However, participants did develop significantly more accurate spatial knowledge as they became more familiar with navigating VEs in general.
@article{wrro5424,
doi = {doi:10.1162/105474698565668},
year = {1998},
number = {2},
note = {{\copyright} 1998 Massachusetts Institute of Technology. Reproduced in accordance with the publisher's self-archiving policy.},
publisher = {Massachusetts Institute of Technology Press},
title = {Navigating large-scale ''desk-top'' virtual buildings:
effects of orientation aids and familiarity},
volume = {7},
journal = {Presence: Teleoperators and Virtual Environments},
month = {April},
pages = {179--192},
url = {http://dx.doi.org/10.1162/105474698565668},
author = {Ruddle, R. A. and Payne, S. J. and Jones, D. M.},
abstract = {Two experiments investigated components of participants' spatial knowledge when they navigated large-scale ''virtual buildings'' using ''desk-top'' (i.e., nonimmersive) virtual
environments (VEs). Experiment 1 showed that participants could estimate directions with reasonable accuracy when they traveled along paths that contained one or two turns (changes of direction), but participants' estimates were significantly less accurate when the paths contained three turns. In Experiment 2 participants repeatedly navigated two more complex virtual buildings, one with and the other without a compass. The accuracy of participants' route-finding and their direction and relative straight-line distance estimates improved with experience, but there were no significant differences between the two compass conditions. However, participants did develop significantly more accurate spatial knowledge as they became more familiar with navigating VEs in general.},
issn = {1054-7460}
}
Two experiments investigated components of participants' spatial knowledge when they navigated large-scale "virtual buildings" using "desk-top" (i.e., nonimmersive) virtual environments (VEs). Experiment 1 showed that participants could estimate directions with reasonable accuracy when they traveled along paths that contained one or two turns (changes of direction), but participants' estimates were significantly less accurate when the paths contained three turns. In Experiment 2 participants repeatedly navigated two more complex virtual buildings, one with and the other without a compass. The accuracy of participants' route-finding and their direction and relative straight-line distance estimates improved with experience, but there were no significant differences between the two compass conditions. However, participants did develop significantly more accurate spatial knowledge as they became more familiar with navigating VEs in general.
@article{wrro76428,
volume = {7},
month = {April},
journal = {Presence: Teleoperators and Virtual Environments},
pages = {179 -- 192},
publisher = {Massachusetts Institute of Technology Press},
title = {Navigating large-scale "desk-top" virtual buildings: Effects of orientation aids and familiarity},
note = {{\copyright} 1998, Massachusetts Institute of Technology Press. Reproduced in accordance with the publisher's self-archiving policy. },
number = {2},
doi = {10.1162/105474698565668},
year = {1998},
issn = {1054-7460},
abstract = {Two experiments investigated components of participants' spatial knowledge when they navigated large-scale "virtual buildings" using "desk-top" (i.e., nonimmersive) virtual environments (VEs). Experiment 1 showed that participants could estimate directions with reasonable accuracy when they traveled along paths that contained one or two turns (changes of direction), but participants' estimates were significantly less accurate when the paths contained three turns. In Experiment 2 participants repeatedly navigated two more complex virtual buildings, one with and the other without a compass. The accuracy of participants' route-finding and their direction and relative straight-line distance estimates improved with experience, but there were no significant differences between the two compass conditions. However, participants did develop significantly more accurate spatial knowledge as they became more familiar with navigating VEs in general.},
author = {Ruddle, RA and Payne, SJ and Jones, DM},
url = {http://dx.doi.org/10.1162/105474698565668}
}
