Publication List (Carr)
@article{wrro216133,
month = {July},
title = {Distributed Augmentation, Hypersweeps, and Branch Decomposition of Contour Trees for Scientific Exploration},
author = {M. Li and H. Carr and O. R{\"u}bel and B. Wang and G.H. Weber},
publisher = {IEEE},
year = {2024},
note = {{\copyright} 2024 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
url = {https://eprints.whiterose.ac.uk/216133/}
}
Contour trees are a significant tool for data analysis as they capture both local and global variation. However, their utility has been limited by scalability, in particular for distributed computation and storage. We report a distributed data structure for storing the contour tree of a data set distributed on a cluster, based on a fan-in hierarchy, and an algorithm for computing it based on the boundary tree that represents only the superarcs of a contour tree that involve contours that cross boundaries between blocks. This allows us to limit the communication cost for contour tree computation to the complexity of the block boundaries rather than of the entire data set.
@misc{wrro190963,
booktitle = {2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)},
month = {December},
title = {Distributed Hierarchical Contour Trees},
author = {HA Carr and O R{\"u}bel and GH Weber},
publisher = {IEEE},
year = {2022},
journal = {2022 IEEE 12th Symposium on Large Data Analysis and Visualization (LDAV)},
url = {https://eprints.whiterose.ac.uk/190963/},
abstract = {Contour trees are a significant tool for data analysis as they capture both local and global variation. However, their utility has been limited by scalability, in particular for distributed computation and storage. We report a distributed data structure for storing the contour tree of a data set distributed on a cluster, based on a fan-in hierarchy, and an algorithm for computing it based on the boundary tree that represents only the superarcs of a contour tree that involve contours that cross boundaries between blocks. This allows us to limit the communication cost for contour tree computation to the complexity of the block boundaries rather than of the entire data set.}
}
The contour tree is one of the principal tools in scientific visualisation. It captures the connectivity of level sets in scalar fields. In order to apply the contour tree to exascale data we need efficient shared memory and distributed algorithms. Recent work has revealed a parallel performance bottleneck caused by substructures of contour trees called W-structures. We report two novel algorithms that detect and extract the W-structures. We also use the W-structures to show that extended persistence is not equivalent to branch decomposition and leaf-pruning.
@misc{wrro167116,
month = {September},
author = {P Hristov and HA Carr},
series = {Mathematics and Visualization (MATHVISUAL)},
note = {{\copyright} 2021 The Author(s), under exclusive license to Springer Nature Switzerland AG. This version of the article has been accepted for publication, after peer review (when applicable) and is subject to Springer Nature?s AM terms of use (https://www.springernature.com/gp/open-research/policies/accepted-manuscript-terms), but is not the Version of Record and does not reflect post-acceptance improvements, or any corrections. The Version of Record is available online at https://doi.org/10.1007/978-3-030-83500-2\_1.},
booktitle = {Topological Methods in Data Analysis and Visualization VI},
title = {W-Structures in Contour Trees},
publisher = {Springer},
year = {2021},
journal = {Topological Methods in Data Analysis and Visualization VI},
pages = {3--18},
url = {https://eprints.whiterose.ac.uk/167116/},
abstract = {The contour tree is one of the principal tools in scientific visualisation. It captures the connectivity of level sets in scalar fields. In order to apply the contour tree to exascale data we need efficient shared memory and distributed algorithms. Recent work has revealed a parallel performance bottleneck caused by substructures of contour trees called W-structures. We report two novel algorithms that detect and extract the W-structures. We also use the W-structures to show that extended persistence is not equivalent to branch decomposition and leaf-pruning.}
}
As data sets grow to exascale, automated data analysis and visualisation are increasingly important, to intermediate human understanding and to reduce demands on disk storage via in situ analysis. Trends in architecture of high performance computing systems necessitate analysis algorithms to make effective use of combinations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses relationships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for computing the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analysis. While there is some work on distributed contour tree computation, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with formal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with more than 30{$\times$} parallel speed up on both CPU using TBB and GPU using Thrust and up 70{$\times$} speed up compared to the serial sweep and merge algorithm.
@article{wrro151668,
volume = {27},
number = {4},
month = {April},
author = {HA Carr and GH Weber and CM Sewell and O R{\"u}bel and P Fasel and JP Ahrens},
note = {Protected by copyright. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
title = {Scalable Contour Tree Computation by Data Parallel Peak Pruning},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
year = {2021},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {2437--2454},
url = {https://eprints.whiterose.ac.uk/151668/},
abstract = {As data sets grow to exascale, automated data analysis and visualisation are increasingly important, to intermediate human understanding and to reduce demands on disk storage via in situ analysis. Trends in architecture of high performance computing systems necessitate analysis algorithms to make effective use of combinations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses relationships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for computing the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analysis. While there is some work on distributed contour tree computation, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with formal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with more than 30{$\times$} parallel speed up on both CPU using TBB and GPU using Thrust and up 70{$\times$} speed up compared to the serial sweep and merge algorithm.}
}
Contour trees are used for topological data analysis in scientific visualization. While originally computed with serial algorithms, recent work has introduced a vector-parallel algorithm. However, this algorithm is relatively slow for fully augmented contour trees which are needed for many practical data analysis tasks. We therefore introduce a representation called the hyperstructure that enables efficient searches through the contour tree and use it to construct a fully augmented contour tree in data parallel, with performance on average 6 times faster than the state-of-the-art parallel algorithm in the TTK topological toolkit.
@article{wrro171318,
month = {March},
title = {Optimization and Augmentation for Data Parallel Contour Trees},
author = {HA Carr and O R{\"u}bel and GH Weber and JP Ahrens},
publisher = {Institute of Electrical and Electronics Engineers},
year = {2021},
note = {{\copyright} 2021 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
keywords = {Computational Topology, Contour Tree, Parallel Algorith},
url = {https://eprints.whiterose.ac.uk/171318/},
abstract = {Contour trees are used for topological data analysis in scientific visualization. While originally computed with serial algorithms, recent work has introduced a vector-parallel algorithm. However, this algorithm is relatively slow for fully augmented contour trees which are needed for many practical data analysis tasks. We therefore introduce a representation called the hyperstructure that enables efficient searches through the contour tree and use it to construct a fully augmented contour tree in data parallel, with performance on average 6 times faster than the state-of-the-art parallel algorithm in the TTK topological toolkit.}
}
The contour tree is a tool for understanding the topological structure of a scalar field. Recent work has built efficient contour tree algorithms for shared memory parallel computation, driven by the need to analyze large data sets in situ while the simulation is running. Unfortunately, methods for using the contour tree for practical data analysis are still primarily serial, including single isocontour extraction, branch decomposition and simplification. We report data parallel methods for these tasks using a data structure called the hyperstructure and a general purpose approach called a hypersweep. We implement and integrate these methods with a Cinema database that stores features as depth images and with a web server that reconstructs the features for direct visualization.
@misc{wrro167115,
month = {December},
author = {P Hristov and G Weber and H Carr and O R{\"u}bel and JP Ahrens},
note = {{\copyright} 2020, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
booktitle = {2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)},
title = {Data Parallel Hypersweeps for in Situ Topological Analysis},
publisher = {IEEE},
year = {2020},
journal = {Proceedings of2020 IEEE 10th Symposium on Large Data Analysis and Visualization (LDAV)},
pages = {12--21},
keywords = {contour tree, in situ, scalar field, geometric measures, branch decomposition},
url = {https://eprints.whiterose.ac.uk/167115/},
abstract = {The contour tree is a tool for understanding the topological structure of a scalar field. Recent work has built efficient contour tree algorithms for shared memory parallel computation, driven by the need to analyze large data sets in situ while the simulation is running. Unfortunately, methods for using the contour tree for practical data analysis are still primarily serial, including single isocontour extraction, branch decomposition and simplification. We report data parallel methods for these tasks using a data structure called the hyperstructure and a general purpose approach called a hypersweep. We implement and integrate these methods with a Cinema database that stores features as depth images and with a web server that reconstructs the features for direct visualization.}
}
After two decades in computational topology, it is clearly a computationally challenging area. Not only do we have the usual algorithmic and programming difficulties with establishing correctness, we also have a class of problems that are mathematically complex and notationally fragile. Effective development and deployment therefore requires an additional step - construction or selection of suitable test cases. Since we cannot test all possible inputs, our selection of test cases expresses our understanding of the task and of the problems involved. Moreover, the scale of the data sets we work with is such that, no matter how unlikely the behaviour mathematically, it is nearly guaranteed to occur at scale in every run. The test cases we choose are therefore tightly coupled with mathematically pathological cases, and need to be developed using the skills expressed most obviously in the constructing mathematical counterexamples. This paper is therefore a first attempt at reporting, classifying and analysing test cases previously used in computational topology, and the expression of a philosophy of how to test topological code.
@incollection{wrro144396,
month = {December},
author = {H Carr and J Tierny and GH Weber},
series = {Mathematics and Visualization book series},
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author accepted version of a chapter published in Carr H., Fujishiro I., Sadlo F., Takahashi S. (eds) Topological Methods in Data Analysis and Visualization V. TopoInVis 2017. Mathematics and Visualization. Springer, Cham. Uploaded in accordance with the publisher's self-archiving policy.},
booktitle = {Topological Methods in Data Analysis and Visualization V},
title = {Pathological and Test Cases For Reeb Analysis},
publisher = {Springer},
year = {2020},
pages = {103--120},
keywords = {Computational Topology, Reeb Space, Reeb Graph, Contour Tree, Reeb Analysis},
url = {https://eprints.whiterose.ac.uk/144396/},
abstract = {After two decades in computational topology, it is clearly a computationally challenging area. Not only do we have the usual algorithmic and programming difficulties with establishing correctness, we also have a class of problems that are mathematically complex and notationally fragile. Effective development and deployment therefore requires an additional step - construction or selection of suitable test cases. Since we cannot test all possible inputs, our selection of test cases expresses our understanding of the task and of the problems involved. Moreover, the scale of the data sets we work with is such that, no matter how unlikely the behaviour mathematically, it is nearly guaranteed to occur at scale in every run. The test cases we choose are therefore tightly coupled with mathematically pathological cases, and need to be developed using the skills expressed most obviously in the constructing mathematical counterexamples. This paper is therefore a first attempt at reporting, classifying and analysing test cases previously used in computational topology, and the expression of a philosophy of how to test topological code.}
}
The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.
@incollection{wrro144583,
month = {December},
author = {D Sakurai and K Ono and H Carr and J Nonaka and T Kawanabe},
series = {Mathematics and Visualization book series},
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author accepted version of a paper published in Sakurai D., Ono K., Carr H., Nonaka J., Kawanabe T. (2020) Flexible Fiber Surfaces: A Reeb-Free Approach. In: Carr H., Fujishiro I., Sadlo F., Takahashi S. (eds) Topological Methods in Data Analysis and Visualization V. TopoInVis 2017. Mathematics and Visualization. Springer, Cham. Uploaded in accordance with the publisher's self-archiving policy.},
booktitle = {Topological Methods in Data Analysis and Visualization V},
title = {Flexible Fiber Surfaces: A Reeb-Free Approach},
publisher = {Springer International Publishing},
year = {2020},
url = {https://eprints.whiterose.ac.uk/144583/},
abstract = {The fiber surface generalizes the popular isosurface to multi-fields, so that pre-images can be visualized as surfaces. As with the isosurface, however, the fiber surface suffers from visual occlusion. We propose to avoid such occlusion by restricting the components to only the relevant ones with a new component-wise flexing algorithm. The approach, flexible fiber surface, generalizes the manipulation idea found in the flexible isosurface for the fiber surface. The flexible isosurface in the original form, however, relies on the contour tree. For the fiber surface, this corresponds to the Reeb space, which is challenging for both the computation and user interaction. We thus take a Reeb-free approach, in which one does not compute the Reeb space. Under this constraint, we generalize a few selected interactions in the flexible isosurface and discuss the implication of the restriction.}
}
As Exascale computing proliferates, we see an accelerating shift towards clusters with thousands of nodes and thousands of cores per node, often on the back of commodity graphics processing units. This paper argues that this drives a once in a generation shift of computation, and that fundamentals of computer science therefore need to be re-examined. Exploiting the full power of Exascale computation will require attention to the fundamentals of programme design and specification, programming language design, systems and software engineering, analytic, performance and cost models, fundamental algorithmic design, and to increasing replacement of human bandwidth by computational analysis. As part of this, we will argue that Exascale computing will require a significant degree of co-design and close attention to the economics underlying the challenges ahead.
@misc{wrro164225,
volume = {12441},
month = {December},
author = {K Djemame and H Carr},
note = {{\copyright} Springer Nature Switzerland AG 2020. This is an author produced version of a conference paper published in Lecture Notes in Computer Science. Uploaded in accordance with the publisher's self-archiving policy.
This version of the article has been accepted for publication, after peer review (when applicable) and is subject to Springer Nature?s AM terms of use (https://www.springernature.com/gp/open-research/policies/accepted-manuscript-terms), but is not the Version of Record and does not reflect post-acceptance improvements, or any corrections. The Version of Record is available online at: https://doi.org/10.1007/978-3-030-63058-4\_19 .},
booktitle = {GECON2020: 17th International Conference on the Economics of Grids, Clouds, Systems, and Services},
editor = {K Djemame and J Altmann and J{\'A} Ba{\~n}ares and O Agmon Ben-Yehuda and V Stankovski and B Tuffin},
title = {Exascale Computing Deployment Challenges},
address = {Cham, Switzerland},
publisher = {Springer},
year = {2020},
journal = {Lecture Notes in Computer Science},
pages = {211--216},
keywords = {Exascale computing; High performance computing; Holistic approach; Economics},
url = {https://eprints.whiterose.ac.uk/164225/},
abstract = {As Exascale computing proliferates, we see an accelerating shift towards clusters with thousands of nodes and thousands of cores per node, often on the back of commodity graphics processing units. This paper argues that this drives a once in a generation shift of computation, and that fundamentals of computer science therefore need to be re-examined. Exploiting the full power of Exascale computation will require attention to the fundamentals of programme design and specification, programming language design, systems and software engineering, analytic, performance and cost models, fundamental algorithmic design, and to increasing replacement of human bandwidth by computational analysis. As part of this, we will argue that Exascale computing will require a significant degree of co-design and close attention to the economics underlying the challenges ahead.}
}
Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.
@article{wrro100067,
volume = {23},
number = {7},
month = {July},
author = {P Klacansky and J Tierny and H Carr and Z Geng},
note = {(c) 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Fast and Exact Fiber Surfaces for Tetrahedral Meshes},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
year = {2017},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {1782--1795},
keywords = {Bivariate Data, Data Segmentation, Data Analysis, Isosurfaces, Continuous Scatterplot},
url = {https://eprints.whiterose.ac.uk/100067/},
abstract = {Isosurfaces are fundamental geometrical objects for the analysis and visualization of volumetric scalar fields. Recent work has generalized them to bivariate volumetric fields with fiber surfaces, the pre-image of polygons in range space. However, the existing algorithm for their computation is approximate, and is limited to closed polygons. Moreover, its runtime performance does not allow instantaneous updates of the fiber surfaces upon user edits of the polygons. Overall, these limitations prevent a reliable and interactive exploration of the space of fiber surfaces. This paper introduces the first algorithm for the exact computation of fiber surfaces in tetrahedral meshes. It assumes no restriction on the topology of the input polygon, handles degenerate cases and better captures sharp features induced by polygon bends. The algorithm also allows visualization of individual fibers on the output surface, better illustrating their relationship with data features in range space. To enable truly interactive exploration sessions, we further improve the runtime performance of this algorithm. In particular, we show that it is trivially parallelizable and that it scales nearly linearly with the number of cores. Further, we study acceleration data-structures both in geometrical domain and range space and we show how to generalize interval trees used in isosurface extraction to fiber surface extraction. Experiments demonstrate the superiority of our algorithm over previous work, both in terms of accuracy and running time, with up to two orders of magnitude speedups. This improvement enables interactive edits of range polygons with instantaneous updates of the fiber surface for exploration purpose. A VTK-based reference implementation is provided as additional material to reproduce our results.}
}
As data sets grow to exascale, automated data analysis and visu- alisation are increasingly important, to intermediate human under- standing and to reduce demands on disk storage via in situ anal- ysis. Trends in architecture of high performance computing sys- tems necessitate analysis algorithms to make effective use of com- binations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses rela- tionships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for com- puting the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analy- sis. While there is some work on distributed contour tree computa- tion, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with for- mal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with up to 10{$\times$} parallel speed up in OpenMP and up to 50{$\times$} speed up in NVIDIA Thrust.
@misc{wrro106038,
month = {March},
author = {HA Carr and GH Weber and CM Sewell and JP Ahrens},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
booktitle = {LDAV 2016},
title = {Parallel Peak Pruning for Scalable SMP Contour Tree Computation},
publisher = {IEEE},
year = {2017},
journal = {6th IEEE Symposium on Large Data Analysis and Visualization},
pages = {75--84},
keywords = {topological analysis, contour tree, merge tree, data parallel algorithms},
url = {https://eprints.whiterose.ac.uk/106038/},
abstract = {As data sets grow to exascale, automated data analysis and visu- alisation are increasingly important, to intermediate human under- standing and to reduce demands on disk storage via in situ anal- ysis. Trends in architecture of high performance computing sys- tems necessitate analysis algorithms to make effective use of com- binations of massively multicore and distributed systems. One of the principal analytic tools is the contour tree, which analyses rela- tionships between contours to identify features of more than local importance. Unfortunately, the predominant algorithms for com- puting the contour tree are explicitly serial, and founded on serial metaphors, which has limited the scalability of this form of analy- sis. While there is some work on distributed contour tree computa- tion, and separately on hybrid GPU-CPU computation, there is no efficient algorithm with strong formal guarantees on performance allied with fast practical performance. We report the first shared SMP algorithm for fully parallel contour tree computation, with for- mal guarantees of O(lgnlgt) parallel steps and O(nlgn) work, and implementations with up to 10{$\times$} parallel speed up in OpenMP and up to 50{$\times$} speed up in NVIDIA Thrust.}
}
Lattice Quantum Chromodynamics (QCD) is an approach used by theo- retical physicists to model the strong nuclear force. This works at the sub-nuclear scale to bind quarks together into hadrons including the proton and neutron. One of the long term goals in lattice QCD is to produce a phase diagram of QCD matter as thermodynamic control parameters temperature and baryon chemical potential are varied. The ability to predict critical points in the phase diagram, known as phase transitions, is one of the on-going challenges faced by domain scientists. In this work we consider how multivariate topological visualisation techniques can be ap- plied to simulation data to help domain scientists predict the location of phase tran- sitions. In the process it is intended that applying these techniques to lattice QCD will strengthen the interpretation of output from multivariate topological algorithms, including the joint contour net. Lattice QCD presents an interesting opportunity for using these techniques as it offers a rich array of interacting scalar fields for anal- ysis; however, it also presents unique challenges due to its reliance on quantum mechanics to interpret the data.
@inproceedings{wrro114658,
booktitle = {Topology-based Methods in Visualization 2017 (TopoInVis 2017)},
month = {February},
title = {Joint Contour Net analysis of lattice QCD data},
author = {DP Thomas and R Borgo and HA Carr and S Hands},
year = {2017},
keywords = {Computational Topology; Joint Contour Net; Reeb Space},
url = {https://eprints.whiterose.ac.uk/114658/},
abstract = {Lattice Quantum Chromodynamics (QCD) is an approach used by theo- retical physicists to model the strong nuclear force. This works at the sub-nuclear scale to bind quarks together into hadrons including the proton and neutron. One of the long term goals in lattice QCD is to produce a phase diagram of QCD matter as thermodynamic control parameters temperature and baryon chemical potential are varied. The ability to predict critical points in the phase diagram, known as phase transitions, is one of the on-going challenges faced by domain scientists. In this work we consider how multivariate topological visualisation techniques can be ap- plied to simulation data to help domain scientists predict the location of phase tran- sitions. In the process it is intended that applying these techniques to lattice QCD will strengthen the interpretation of output from multivariate topological algorithms, including the joint contour net. Lattice QCD presents an interesting opportunity for using these techniques as it offers a rich array of interacting scalar fields for anal- ysis; however, it also presents unique challenges due to its reliance on quantum mechanics to interpret the data.}
}
This paper presents an efficient algorithm for the computation of the Reeb space of an input bivariate piecewise linear scalar function f defined on a tetrahedral mesh. By extending and generalizing algorithmic concepts from the univariate case to the bivariate one, we report the first practical, output-sensitive algorithm for the exact computation of such a Reeb space. The algorithm starts by identifying the Jacobi set of f , the bivariate analogs of critical points in the univariate case. Next, the Reeb space is computed by segmenting the input mesh along the new notion of Jacobi Fiber Surfaces, the bivariate analog of critical contours in the univariate case. We additionally present a simplification heuristic that enables the progressive coarsening of the Reeb space. Our algorithm is simple to implement and most of its computations can be trivially parallelized. We report performance numbers demonstrating orders of magnitude speedups over previous approaches, enabling for the first time the tractable computation of bivariate Reeb spaces in practice. Moreover, unlike range-based quantization approaches (such as the Joint Contour Net), our algorithm is parameter-free. We demonstrate the utility of our approach by using the Reeb space as a semi-automatic segmentation tool for bivariate data. In particular, we introduce continuous scatterplot peeling, a technique which enables the reduction of the cluttering in the continuous scatterplot, by interactively selecting the features of the Reeb space to project. We provide a VTK-based C++ implementation of our algorithm that can be used for reproduction purposes or for the development of new Reeb space based visualization techniques.
@article{wrro103600,
volume = {23},
number = {1},
month = {January},
author = {J Tierny and HA Carr},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Jacobi Fiber Surfaces for Bivariate Reeb Space Computation},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
year = {2017},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {960--969},
keywords = {Topological data analysis, multivariate data, data segmentation},
url = {https://eprints.whiterose.ac.uk/103600/},
abstract = {This paper presents an efficient algorithm for the computation of the Reeb space of an input bivariate piecewise linear scalar function f defined on a tetrahedral mesh. By extending and generalizing algorithmic concepts from the univariate case to the bivariate one, we report the first practical, output-sensitive algorithm for the exact computation of such a Reeb space. The algorithm starts by identifying the Jacobi set of f , the bivariate analogs of critical points in the univariate case. Next, the Reeb space is computed by segmenting the input mesh along the new notion of Jacobi Fiber Surfaces, the bivariate analog of critical contours in the univariate case. We additionally present a simplification heuristic that enables the progressive coarsening of the Reeb space. Our algorithm is simple to implement and most of its computations can be trivially parallelized. We report performance numbers demonstrating orders of magnitude speedups over previous approaches, enabling for the first time the tractable computation of bivariate Reeb spaces in practice. Moreover, unlike range-based quantization approaches (such as the Joint Contour Net), our algorithm is parameter-free. We demonstrate the utility of our approach by using the Reeb space as a semi-automatic segmentation tool for bivariate data. In particular, we introduce continuous scatterplot peeling, a technique which enables the reduction of the cluttering in the continuous scatterplot, by interactively selecting the features of the Reeb space to project. We provide a VTK-based C++ implementation of our algorithm that can be used for reproduction purposes or for the development of new Reeb space based visualization techniques.}
}
Multifield data are common in visualization. However, reducing these data to comprehensible geometry is a challenging problem. Fiber surfaces, an analogy of isosurfaces to bivariate volume data, are a promising new mechanism for understanding multifield volumes. In this work, we explore direct ray casting of fiber surfaces from volume data without any explicit geometry extraction. We sample directly along rays in domain space, and perform geometric tests in range space where fibers are defined, using a signed distance field derived from the control polygons. Our method requires little preprocess, and enables real-time exploration of data, dynamic modification and pixel-exact rendering of fiber surfaces, and support for higher-order interpolation in domain space. We demonstrate this approach on several bivariate datasets, including analysis of multi-field combustion data.
@article{wrro103601,
volume = {23},
number = {1},
month = {January},
author = {K Wu and A Knoll and BJ Isaac and HA Carr and V Pascucci},
note = {{\copyright} 2016 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Direct Multifield Volume Ray Casting of Fiber Surfaces},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
year = {2017},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {941--949},
keywords = {Multidimensional Data, Volume Rendering, Isosurface; Isosurfaces, Rendering (computer graphics), Casting, Power capacitors, Aerospace electronics, Acceleration, Transfer functions},
url = {https://eprints.whiterose.ac.uk/103601/},
abstract = {Multifield data are common in visualization. However, reducing these data to comprehensible geometry is a challenging problem. Fiber surfaces, an analogy of isosurfaces to bivariate volume data, are a promising new mechanism for understanding multifield volumes. In this work, we explore direct ray casting of fiber surfaces from volume data without any explicit geometry extraction. We sample directly along rays in domain space, and perform geometric tests in range space where fibers are defined, using a signed distance field derived from the control polygons. Our method requires little preprocess, and enables real-time exploration of data, dynamic modification and pixel-exact rendering of fiber surfaces, and support for higher-order interpolation in domain space. We demonstrate this approach on several bivariate datasets, including analysis of multi-field combustion data.}
}
Topological simplification of scalar and vector fields is well-established as an effective method for analysing and visualising complex data sets. For multivariate (alternatively, multi-field) data, topological analysis requires simultaneous advances both mathematically and computationally. We propose a robust multivariate topology simplification method based on ?lip?-pruning from the Reeb space. Mathematically, we show that the projection of the Jacobi set of multivariate data into the Reeb space produces a Jacobi structure that separates the Reeb space into simple components. We also show that the dual graph of these components gives rise to a Reeb skeleton that has properties similar to the scalar contour tree and Reeb graph, for topologically simple domains. We then introduce a range measure to give a scaling-invariant total ordering of the components or features that can be used for simplification. Computationally, we show how to compute Jacobi structure, Reeb skeleton, range and geometric measures in the Joint Contour Net (an approximation of the Reeb space) and that these can be used for visualisation similar to the contour tree or Reeb graph.
@article{wrro100068,
volume = {58},
month = {October},
author = {A Chattopadhyay and H Carr and D Duke and Z Geng and O Saeki},
note = {{\copyright} 2016 Elsevier B.V. This is an author produced version of a paper published in Computational Geometry. Uploaded in accordance with the publisher's self-archiving policy.},
title = {Multivariate Topology Simplification},
publisher = {Elsevier},
year = {2016},
journal = {Computational Geometry},
pages = {1--24},
keywords = {Simplification; Multivariate topology; Reeb space; Reeb skeleton; Multi-dimensional Reeb graph},
url = {https://eprints.whiterose.ac.uk/100068/},
abstract = {Topological simplification of scalar and vector fields is well-established as an effective method for analysing and visualising complex data sets. For multivariate (alternatively, multi-field) data, topological analysis requires simultaneous advances both mathematically and computationally. We propose a robust multivariate topology simplification method based on ?lip?-pruning from the Reeb space. Mathematically, we show that the projection of the Jacobi set of multivariate data into the Reeb space produces a Jacobi structure that separates the Reeb space into simple components. We also show that the dual graph of these components gives rise to a Reeb skeleton that has properties similar to the scalar contour tree and Reeb graph, for topologically simple domains. We then introduce a range measure to give a scaling-invariant total ordering of the components or features that can be used for simplification. Computationally, we show how to compute Jacobi structure, Reeb skeleton, range and geometric measures in the Joint Contour Net (an approximation of the Reeb space) and that these can be used for visualisation similar to the contour tree or Reeb graph.}
}
Seismic data visualisation and analysis is an area of research interest for a lot of commercial and academic disciplines. It enables the geoscientists to understand structures underneath the earth. It is an important step in building subsurface geological models to identify hydrocarbon reservoirs and running geological simulations. Good quality watertight surface meshes are required for constructing these models for accurate identification and extraction of strata/horizons that contain carbon deposits such as fuel and gas. This research demonstrates extracting watertight geometric surfaces from 3D seismic volumes to improve horizon identification and extraction. Isosurfaces and Fiber Surfaces are proposed for extracting horizons from seismic data. Initial tests with isosurfaces have been conducted and further experiments using fiber furfaces are underway as next direction and discussed in sections 4.5 and 4.6.
@misc{wrro106638,
booktitle = {Computer Graphics \& Visual Computing (CGVC) 2016},
month = {September},
title = {Generating Watertight Isosurfaces from 3D Seismic Data},
author = {MS Khan and H Carr and D Angus},
publisher = {Eurographics Association for Computer Graphics},
year = {2016},
journal = {Computer Graphics \& Visual Computing (CGVC) 2016},
keywords = {Computer Graphics, Volume Visualisation, Isosurfaces, Watertight Meshes, Seismic Volumes, Seismic Horizon, Surface Handles},
url = {https://eprints.whiterose.ac.uk/106638/},
abstract = {Seismic data visualisation and analysis is an area of research interest for a lot of commercial and academic disciplines. It enables the geoscientists to understand structures underneath the earth. It is an important step in building subsurface geological models to identify hydrocarbon reservoirs and running geological simulations. Good quality watertight surface meshes are required for constructing these models for accurate identification and extraction of strata/horizons that contain carbon deposits such as fuel and gas. This research demonstrates extracting watertight geometric surfaces from 3D seismic volumes to improve horizon identification and extraction. Isosurfaces and Fiber Surfaces are proposed for extracting horizons from seismic data. Initial tests with isosurfaces have been conducted and further experiments using fiber furfaces are underway as next direction and discussed in sections 4.5 and 4.6.}
}
As data sets increase in size beyond the petabyte, it is increasingly important to have automated methods for data analysis and visualisation. While topological analysis tools such as the contour tree and Morse-Smale complex are now well established, there is still a shortage of efficient parallel algorithms for their computation, in particular for massively data-parallel compu- tation on a SIMD model. We report the first data-parallel algorithm for computing the fully augmented contour tree, using a quantised computation model. We then extend this to provide a hybrid data-parallel / distributed algorithm allowing scaling beyond a single GPU or CPU, and provide results for its computation. Our implementation uses the portable data-parallel primitives provided by NVIDIA?s Thrust library, allowing us to compile our same code for both GPUs and multi-core CPUs.
@misc{wrro107190,
month = {September},
author = {H Carr and C Sewell and L-T Lo and J Ahrens},
booktitle = {CGVC 2016},
editor = {C Turkay and TR Wan},
title = {Hybrid Data-Parallel Contour Tree Computation},
publisher = {The Eurographics Association},
journal = {Computer Graphics \& Visual Computing},
year = {2016},
keywords = {topological analysis, contour tree, merge tree, data parallel algorithms},
url = {https://eprints.whiterose.ac.uk/107190/},
abstract = {As data sets increase in size beyond the petabyte, it is increasingly important to have automated methods for data analysis and visualisation. While topological analysis tools such as the contour tree and Morse-Smale complex are now well established, there is still a shortage of efficient parallel algorithms for their computation, in particular for massively data-parallel compu- tation on a SIMD model. We report the first data-parallel algorithm for computing the fully augmented contour tree, using a quantised computation model. We then extend this to provide a hybrid data-parallel / distributed algorithm allowing scaling beyond a single GPU or CPU, and provide results for its computation. Our implementation uses the portable data-parallel primitives provided by NVIDIA?s Thrust library, allowing us to compile our same code for both GPUs and multi-core CPUs.}
}
Scalar topology in the form of Morse theory has provided computational tools that analyze and visualize data from scientific and engineering tasks. Contracting isocontours to single points encapsulates variations in isocontour connectivity in the Reeb graph. For multivariate data, isocontours generalize to fibers{–}inverse images of points in the range, and this area is therefore known as fiber topology. However, fiber topology is less fully developed than Morse theory, and current efforts rely on manual visualizations. This paper presents how to accelerate and semi-automate this task through an interface for visualizing fiber singularities of multivariate functions R3 {$\rightarrow$} R2. This interface exploits existing conventions of fiber topology, but also introduces a 3D view based on the extension of Reeb graphs to Reeb spaces. Using the Joint Contour Net, a quantized approximation of the Reeb space, this accelerates topological visualization and permits online perturbation to reduce or remove degeneracies in functions under study. Validation of the interface is performed by assessing whether the interface supports the mathematical workflow both of experts and of less experienced mathematicians.
@article{wrro88921,
volume = {22},
number = {1},
month = {January},
author = {D Sakurai and O Saeki and H Carr and H-Y Wu and T Yamamoto and D Duke and S Takahashi},
note = {{\copyright} 2015, IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other uses, in any current or future media, including reprinting/republishing this material for advertising or promotional purposes, creating new collective works, for resale or redistribution to servers or lists, or reuse of any copyrighted component of this work in other works.},
title = {Interactive Visualization for Singular Fibers of Functions f : R3 {$\rightarrow$} R2},
publisher = {Institute of Electrical and Electronics Engineers},
year = {2016},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {945--954},
keywords = {singular fibers; fiber topology; mathematical visualization; design study},
url = {https://eprints.whiterose.ac.uk/88921/},
abstract = {Scalar topology in the form of Morse theory has provided computational tools that analyze and visualize data from scientific and engineering tasks. Contracting isocontours to single points encapsulates variations in isocontour connectivity in the Reeb graph. For multivariate data, isocontours generalize to fibers{--}inverse images of points in the range, and this area is therefore known as fiber topology. However, fiber topology is less fully developed than Morse theory, and current efforts rely on manual visualizations.
This paper presents how to accelerate and semi-automate this task through an interface for visualizing fiber singularities of multivariate functions R3 {$\rightarrow$} R2. This interface exploits existing conventions of fiber topology, but also introduces a 3D view based on the extension of Reeb graphs to Reeb spaces. Using the Joint Contour Net, a quantized approximation of the Reeb space, this accelerates topological visualization and permits online perturbation to reduce or remove degeneracies in functions under study. Validation of the interface is performed by assessing whether the interface supports the mathematical workflow both of experts and of less experienced mathematicians.}
}
Scientific visualization has many effective methods for examining and exploring scalar and vector fields, but rather fewer for bivariate fields. We report the first general purpose approach for the interactive extraction of geometric separating surfaces in bivariate fields. This method is based on fiber surfaces: surfaces constructed from sets of fibers, the multivariate analogues of isolines. We show simple methods for fiber surface definition and extraction. In particular, we show a simple and efficient fiber surface extraction algorithm based on Marching Cubes. We also show how to construct fiber surfaces interactively with geometric primitives in the range of the function. We then extend this to build user interfaces that generate parameterized families of fiber surfaces with respect to arbitrary polygons. In the special case of isovalue-gradient plots, fiber surfaces capture features geometrically for quantitative analysis that have previously only been analysed visually and qualitatively using multi-dimensional transfer functions in volume rendering. We also demonstrate fiber surface extraction on a variety of bivariate data.
@article{wrro86871,
volume = {34},
number = {3},
month = {June},
author = {HA Carr and Z Geng and J Tierny and A Chattopadhyay and A Knoll},
note = {{\copyright} 2015 The Author(s) Computer Graphics Forum {\copyright} 2015 The Eurographics Association and John Wiley \& Sons Ltd. Published by John Wiley \& Sons Ltd. This is the peer reviewed version of the following article: Carr, H., Geng, Z., Tierny, J., Chattopadhyay, A. and Knoll, A. (2015), Fiber Surfaces: Generalizing Isosurfaces to Bivariate Data. Computer Graphics Forum, 34: 241?250. doi: 10.1111/cgf.12636, which has been published in final form at http://dx.doi.org/10.1111/cgf.12636. This article may be used for non-commercial purposes in accordance with Wiley Terms and Conditions for Self-Archiving.},
title = {Fiber surfaces: generalizing isosurfaces to bivariate data},
publisher = {Wiley},
year = {2015},
journal = {Computer Graphics Forum},
pages = {241--250},
url = {https://eprints.whiterose.ac.uk/86871/},
abstract = {Scientific visualization has many effective methods for examining and exploring scalar and vector fields, but rather fewer for bivariate fields. We report the first general purpose approach for the interactive extraction of geometric separating surfaces in bivariate fields. This method is based on fiber surfaces: surfaces constructed from sets of fibers, the multivariate analogues of isolines. We show simple methods for fiber surface definition and extraction. In particular, we show a simple and efficient fiber surface extraction algorithm based on Marching Cubes. We also show how to construct fiber surfaces interactively with geometric primitives in the range of the function. We then extend this to build user interfaces that generate parameterized families of fiber surfaces with respect to arbitrary polygons. In the special case of isovalue-gradient plots, fiber surfaces capture features geometrically for quantitative analysis that have previously only been analysed visually and qualitatively using multi-dimensional transfer functions in volume rendering. We also demonstrate fiber surface extraction on a variety of bivariate data.}
}
Airborne Laser Scanning (ALS) was introduced to provide rapid, high resolution scans of landforms for computational processing. More recently, ALS has been adapted for scanning urban areas. The greater complexity of urban scenes necessitates the development of novel methods to exploit urban ALS to best advantage. This paper presents occlusion images: a novel technique that exploits the geometric complexity of the urban environment to improve visualisation of small details for better feature recognition. The algorithm is based on an inversion of traditional occlusion techniques.
@article{wrro97575,
volume = {104},
month = {June},
author = {T Hinks and H Carr and H Gharibi and DF Laefer},
note = {{\copyright} 2015 International Society for Photogrammetry and Remote Sensing, Inc. (ISPRS). Published by Elsevier B.V. This is an author produced version of a paper published in ISPRS Journal of Photogrammetry and Remote Sensing. Uploaded in accordance with the publisher's self-archiving policy.},
title = {Visualisation of urban airborne laser scanning data with occlusion images},
publisher = {Elsevier},
year = {2015},
journal = {ISPRS Journal of Photogrammetry and Remote Sensing},
pages = {77--87},
keywords = {Airborne laser scanning; LiDAR; Ambient occlusion; Urban modelling; Elevation image; Visualisation},
url = {https://eprints.whiterose.ac.uk/97575/},
abstract = {Airborne Laser Scanning (ALS) was introduced to provide rapid, high resolution scans of landforms for computational processing. More recently, ALS has been adapted for scanning urban areas. The greater complexity of urban scenes necessitates the development of novel methods to exploit urban ALS to best advantage. This paper presents occlusion images: a novel technique that exploits the geometric complexity of the urban environment to improve visualisation of small details for better feature recognition. The algorithm is based on an inversion of traditional occlusion techniques.}
}
Understanding the mechanisms of induced nuclear fission for a broad range of neutron energies could help resolve fundamental science issues, such as the formation of elements in the universe, but could have also a large impact on societal applications in energy production or nuclear waste management. The goal of this paper is to set up the foundations of a microscopic theory to study the static aspects of induced fission as a function of the excitation energy of the incident neutron, from thermal to fast neutrons. To account for the high excitation energy of the compound nucleus, we employ a statistical approach based on finite temperature nuclear density functional theory with Skyrme energy densities, which we benchmark on the Pu239(n,f) reaction. We compute the evolution of the least-energy fission pathway across multidimensional potential energy surfaces with up to five collective variables as a function of the nuclear temperature and predict the evolution of both the inner and the outer fission barriers as a function of the excitation energy of the compound nucleus. We show that the coupling to the continuum induced by the finite temperature is negligible in the range of neutron energies relevant for many applications of neutron-induced fission. We prove that the concept of quantum localization introduced recently can be extended to T{\ensuremath{>}}0, and we apply the method to study the interaction energy and total kinetic energy of fission fragments as a function of the temperature for the most probable fission. While large uncertainties in theoretical modeling remain, we conclude that a finite temperature nuclear density functional may provide a useful framework to obtain accurate predictions of fission fragment properties.
@article{wrro84783,
volume = {91},
number = {3},
month = {March},
author = {N Schunck and DJ Duke and H Carr},
note = {{\copyright} 2015, American Physical Society. Reproduced in accordance with the publisher's self-archiving policy.},
title = {Description of induced nuclear fission with Skyrme energy functionals. II. Finite temperature effects},
publisher = {American Physical Society},
year = {2015},
journal = {Physical Review C: Nuclear Physics},
keywords = {Fission; Topology; Joint Contour Net},
url = {https://eprints.whiterose.ac.uk/84783/},
abstract = {Understanding the mechanisms of induced nuclear fission for a broad range of neutron energies could help resolve fundamental science issues, such as the formation of elements in the universe, but could have also a large impact on societal applications in energy production or nuclear waste management. The goal of this paper is to set up the foundations of a microscopic theory to study the static aspects of induced fission as a function of the excitation energy of the incident neutron, from thermal to fast neutrons. To account for the high excitation energy of the compound nucleus, we employ a statistical approach based on finite temperature nuclear density functional theory with Skyrme energy densities, which we benchmark on the Pu239(n,f) reaction. We compute the evolution of the least-energy fission pathway across multidimensional potential energy surfaces with up to five collective variables as a function of the nuclear temperature and predict the evolution of both the inner and the outer fission barriers as a function of the excitation energy of the compound nucleus. We show that the coupling to the continuum induced by the finite temperature is negligible in the range of neutron energies relevant for many applications of neutron-induced fission. We prove that the concept of quantum localization introduced recently can be extended to T{\ensuremath{>}}0, and we apply the method to study the interaction energy and total kinetic energy of fission fragments as a function of the temperature for the most probable fission. While large uncertainties in theoretical modeling remain, we conclude that a finite temperature nuclear density functional may provide a useful framework to obtain accurate predictions of fission fragment properties.}
}
Eighty years after its experimental discovery, a description of induced nuclear fission based solely on the interactions between neutrons and protons and quantum many-body methods still poses formidable challenges. The goal of this paper is to contribute to the development of a predictive microscopic framework for the accurate calculation of static properties of fission fragments for hot fission and thermal or slow neutrons. To this end, we focus on the Pu239(n,f) reaction and employ nuclear density functional theory with Skyrme energy densities. Potential energy surfaces are computed at the Hartree-Fock-Bogoliubov approximation with up to five collective variables. We find that the triaxial degree of freedom plays an important role, both near the fission barrier and at scission. The impact of the parametrization of the Skyrme energy density and the role of pairing correlations on deformation properties from the ground state up to scission are also quantified. We introduce a general template for the quantitative description of fission fragment properties. It is based on the careful analysis of scission configurations, using both advanced topological methods and recently proposed quantum many-body techniques. We conclude that an accurate prediction of fission fragment properties at low incident neutron energies, although technologically demanding, should be within the reach of current nuclear density functional theory.
@article{wrro81690,
volume = {90},
number = {5},
month = {November},
author = {N Schunck and DJ Duke and H Carr and A Knoll},
note = {(c) 2014, American Physical Society. Reproduced in accordance with the publisher's self-archiving policy.},
title = {Description of induced nuclear fission with Skyrme energy functionals: static potential energy surfaces and fission fragment properties},
publisher = {American Physical Society},
year = {2014},
journal = {Physical Review C: Nuclear Physics},
url = {https://eprints.whiterose.ac.uk/81690/},
abstract = {Eighty years after its experimental discovery, a description of induced nuclear fission based solely on the interactions between neutrons and protons and quantum many-body methods still poses formidable challenges. The goal of this paper is to contribute to the development of a predictive microscopic framework for the accurate calculation of static properties of fission fragments for hot fission and thermal or slow neutrons. To this end, we focus on the Pu239(n,f) reaction and employ nuclear density functional theory with Skyrme energy densities. Potential energy surfaces are computed at the Hartree-Fock-Bogoliubov approximation with up to five collective variables. We find that the triaxial degree of freedom plays an important role, both near the fission barrier and at scission. The impact of the parametrization of the Skyrme energy density and the role of pairing correlations on deformation properties from the ground state up to scission are also quantified. We introduce a general template for the quantitative description of fission fragment properties. It is based on the careful analysis of scission configurations, using both advanced topological methods and recently proposed quantum many-body techniques. We conclude that an accurate prediction of fission fragment properties at low incident neutron energies, although technologically demanding, should be within the reach of current nuclear density functional theory.}
}
Evaluation, solved and unsolved problems, and future directions are popular themes pervading the visualization community over the last decade. The top unsolved problem in both scientific and information visualization was the subject of an IEEE Visualization Conference panel in 2004. The future of graphics hardware was another important topic of discussion the same year. The subject of how to evaluate visualization returned a few years later. Chris Johnson published a list of 10 top problems in scientific visualization research. This was followed up by report of both past achievements and future challenges in visualization research as well as financial support recommendations to the National Science Foundation (NSF) and National Institute of Health (NIH). Chen recently published the first list of top unsolved information visualization problems. Future research directions of topology-based visualization was also a major theme of a workshop on topology-based methods. Laramee and Kosara published a list of top future challenges in human-centered visualization.
@incollection{wrro144593,
volume = {37},
month = {September},
author = {RS Laramee and H Carr and M Chen and H Hauser and L Linsen and K Mueller and V Natarajan and H Obermaier and R Peikert and E Zhang},
series = {Mathematics and Visualization},
note = {{\copyright} Springer-Verlag London 2014. This is a post-peer-review, pre-copyedited version of book chapter published in Scientific Visualization. The final authenticated version is available online at: https://doi.org/10.1007/978-1-4471-6497-5\_19},
title = {Future Challenges and Unsolved Problems in Multi-field Visualization},
publisher = {Springer, London},
year = {2014},
journal = {Mathematics and Visualization},
pages = {205--211},
keywords = {Tensor Field; Graphic Hardware; Display Primary; Scientific Visualization; Visual Metaphor},
url = {https://eprints.whiterose.ac.uk/144593/},
abstract = {Evaluation, solved and unsolved problems, and future directions are popular themes pervading the visualization community over the last decade. The top unsolved problem in both scientific and information visualization was the subject of an IEEE Visualization Conference panel in 2004. The future of graphics hardware was another important topic of discussion the same year. The subject of how to evaluate visualization returned a few years later. Chris Johnson published a list of 10 top problems in scientific visualization research. This was followed up by report of both past achievements and future challenges in visualization research as well as financial support recommendations to the National Science Foundation (NSF) and National Institute of Health (NIH). Chen recently published the first list of top unsolved information visualization problems. Future research directions of topology-based visualization was also a major theme of a workshop on topology-based methods. Laramee and Kosara published a list of top future challenges in human-centered visualization.}
}
Codes for computational science and downstream analysis (visualization and/or statistical modelling) have historically been dominated by imperative thinking, but this situation is evolving, both through adoption of higher-level tools such as Matlab, and through some adoption of functional ideas in the next generation of toolkits being driven by the vision of extreme-scale computing. However, this is still a long way from seeing a functional language like Haskell used in a live application. This paper makes three contributions to functional programming in computational science. First, we describe how use of Haskell was interleaved in the development of the first practical approach to multifield topology, and its application to the analysis of data from nuclear simulations that has led to new insight into fission. Second, we report subsequent developments of the functional code (i) improving sequential performance to approach that of an imperative implementation, and (ii) the introduction of parallelism through four skeletons exhibiting good scaling and different time/space trade-offs. Finally we consider the broader question of how, where, and why functional programming may - or may not - find further use in computational science.
@misc{wrro79906,
month = {September},
author = {DJ Duke and F Hosseini and H Carr},
booktitle = {The 3rd ACM SIGPLAN Workshop on Functional High-Performance Computing},
editor = {M Sheeran and R Newton},
title = {Parallel Computation of Multifield Topology: Experience of Haskell in a Computational Science Application},
publisher = {ACM Press},
year = {2014},
journal = {Proceedings of the ACM Workshop on Functional High-Performance Computing},
pages = {11--21},
keywords = {Computational topology; joint contour net; Haskell; performance},
url = {https://eprints.whiterose.ac.uk/79906/},
abstract = {Codes for computational science and downstream analysis (visualization and/or statistical modelling) have historically been dominated by imperative thinking, but this situation is evolving, both through adoption of higher-level tools such as Matlab, and through some adoption of functional ideas in the next generation of toolkits being driven by the vision of extreme-scale computing. However, this is still a long way from seeing a functional language like Haskell used in a live application. This paper makes three contributions to functional programming in computational science. First, we describe how use of Haskell was interleaved in the development of the first practical approach to multifield topology, and its application to the analysis of data from nuclear simulations that has led to new insight into fission. Second, we report subsequent developments of the functional code (i) improving sequential performance to approach that of an imperative implementation, and (ii) the introduction of parallelism through four skeletons exhibiting good scaling and different time/space trade-offs. Finally we consider the broader question of how, where, and why functional programming may - or may not - find further use in computational science.}
}
The orientation of fibers in assemblies such as nonwovens has a major influence on the anisotropy of properties of the bulk structure and is strongly influenced by the processes used to manufacture the fabric. To build a detailed understanding of a fabric?s geometry and architecture it is important that fiber orientation in three dimensions is evaluated since out-of-plane orientations may also contribute to the physical properties of the fabric. In this study, a technique for measuring fiber segment orientation as proposed by Eberhardt and Clarke is implemented and experimentally studied based on analysis of X-ray computed microtomographic data. Fiber segment orientation distributions were extracted from volumetric X-ray microtomography data sets of hydroentangled nonwoven fabrics manufactured from parallel-laid, cross-laid, and air-laid webs. Spherical coordinates represented the orientation of individual fibers. Physical testing of the samples by means of zero-span tensile testing and z-directional tensile testing was employed to compare with the computed results.
@article{wrro83459,
volume = {20},
number = {4},
month = {August},
author = {M Tausif and B Duffy and H Carr and S Grishanov and SJ Russell},
note = {{\copyright} Microscopy Society of America 2014. This is an author produced version of a paper published in Microscopy and Microanalysis. Uploaded in accordance with the publisher's self-archiving policy},
title = {Three-Dimensional Fiber Segment Orientation Distribution Using X-Ray Microtomography},
publisher = {Cambridge University Press},
year = {2014},
journal = {Microscopy and Microanalysis},
pages = {1294--1303},
keywords = {Orientation distribution; Fiber; Nonwovens; Three dimensional; X-ray microtomography; Structure; Hydroentanglement},
url = {https://eprints.whiterose.ac.uk/83459/},
abstract = {The orientation of fibers in assemblies such as nonwovens has a major influence on the anisotropy of properties of the bulk structure and is strongly influenced by the processes used to manufacture the fabric. To build a detailed understanding of a fabric?s geometry and architecture it is important that fiber orientation in three dimensions is evaluated since out-of-plane orientations may also contribute to the physical properties of the fabric. In this study, a technique for measuring fiber segment orientation as proposed by Eberhardt and Clarke is implemented and experimentally studied based on analysis of X-ray computed microtomographic data. Fiber segment orientation distributions were extracted from volumetric X-ray microtomography data sets of hydroentangled nonwoven fabrics manufactured from parallel-laid, cross-laid, and air-laid webs. Spherical coordinates represented the orientation of individual fibers. Physical testing of the samples by means of zero-span tensile testing and z-directional tensile testing was employed to compare with the computed results.}
}
This paper presents the fundamental mathematics to determine the minimum crack width detectable with a terrestrial laser scanner in unit-based masonry. Orthogonal offset, interval scan angle, crack orientation, and crack depth are the main parameters. The theoretical work is benchmarked against laboratory tests using 4 samples with predesigned crack widths of 1-7 mm scanned at orthogonal distances of 5.0-12.5 m and at angles of 0 -30. Results showed that absolute errors of crack width were mostly less than 1.37 mm when the orthogonal distance varied 5.0-7.5 m but significantly increased for greater distances. The orthogonal distance had a disproportionately negative effect compared to the scan angle.
@article{wrro79316,
volume = {62},
month = {March},
author = {DF Laefer and L Truong-Hong and H Carr and M Singh},
note = {(c) 2014, Elsevier. NOTICE: this is the author?s version of a work that was accepted for publication in NDT and E International. Changes resulting from the publishing process, such as peer review, editing, corrections, structural formatting, and other quality control mechanisms may not be reflected in this document. Changes may have been made to this work since it was submitted for publication. A definitive version was subsequently published in NDT and E International, 62, 2014, 10.1016/j.ndteint.2013.11.001
},
title = {Crack detection limits in unit based masonry with terrestrial laser scanning},
publisher = {Elsevier},
year = {2014},
journal = {NDT and E International},
pages = {66 -- 76},
keywords = {Terrestrial laser scanning; Point cloud data; Crack detection; Structural health monitoring; Condition assessment; Masonry},
url = {https://eprints.whiterose.ac.uk/79316/},
abstract = {This paper presents the fundamental mathematics to determine the minimum crack width detectable with a terrestrial laser scanner in unit-based masonry. Orthogonal offset, interval scan angle, crack orientation, and crack depth are the main parameters. The theoretical work is benchmarked against laboratory tests using 4 samples with predesigned crack widths of 1-7 mm scanned at orthogonal distances of 5.0-12.5 m and at angles of 0 -30. Results showed that absolute errors of crack width were mostly less than 1.37 mm when the orthogonal distance varied 5.0-7.5 m but significantly increased for greater distances. The orthogonal distance had a disproportionately negative effect compared to the scan angle.}
}
As with individual fields, one approach to visualizing multifields is to analyze the field and identify features. While some work has been carried out in detecting features in multifields, any discussion of multifield analysis must also identify techniques from single fields that can be extended appropriately.
@incollection{wrro97576,
author = {H Carr},
series = {Mathematics and Visualization},
booktitle = {Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization},
editor = {CD Hansen and M Chen and CR Johnson and AE Kaufman and H Hagen},
title = {Feature analysis in multifields},
address = {London},
publisher = {Springer-Verlag},
year = {2014},
journal = {Mathematics and Visualization},
pages = {197--204},
url = {https://eprints.whiterose.ac.uk/97576/},
abstract = {As with individual fields, one approach to visualizing multifields is to analyze the field and identify features. While some work has been carried out in detecting features in multifields, any discussion of multifield analysis must also identify techniques from single fields that can be extended appropriately.}
}
Computational topology is of interest in visualization because it summarizes useful global properties of a dataset. The greatest need for such abstractions is in massive data, and to date most implementations have opted for low-level languages to obtain space and time-efficient implementations. Such code is complex, and is becoming even more so with the need to operate efficiently on a range of parallel hardware. Motivated by rapid advances in functional programming and compiler technology, this chapter investigates whether a shift in programming paradigm could reduce the complexity of the task. Focusing on contour tree generation as a case study, the chapter makes three contributions. First, it sets out the development of a concise functional implementation of the algorithm. Second, it shows that the sequential functional code can be tuned to match the performance of an imperative implementation, albeit at some cost in code clarity. Third, it outlines new possiblilities for parallelisation using functional tools, and notes similarities between functional abstractions and emerging ideas in extreme-scale visualization.
@incollection{wrro81914,
booktitle = {Topology-Based Methods in Visualization III},
editor = {P-T Bremer and I Hotz and V Pascucci and R Peikert},
title = {Computational topology via functional programming: a baseline analysis},
author = {DJ Duke and H Carr},
publisher = {Springer},
year = {2014},
pages = {73 -- 88},
url = {https://eprints.whiterose.ac.uk/81914/},
abstract = {Computational topology is of interest in visualization because it summarizes useful global properties of a dataset. The greatest need for such abstractions is in massive data, and to date most implementations have opted for low-level languages to obtain space and time-efficient implementations. Such code is complex, and is becoming even more so with the need to operate efficiently on a range of parallel hardware. Motivated by rapid advances in functional programming and compiler technology, this chapter investigates whether a shift in programming paradigm could reduce the complexity of the task. Focusing on contour tree generation as a case study, the chapter makes three contributions. First, it sets out the development of a concise functional implementation of the algorithm. Second, it shows that the sequential functional code can be tuned to match the performance of an imperative implementation, albeit at some cost in code clarity. Third, it outlines new possiblilities for parallelisation using functional tools, and notes similarities between functional abstractions and emerging ideas in extreme-scale visualization.}
}
Multifield visualization covers a range of data types that can be visualized with many different techniques.We summarize both the data types and the categories of techniques, and lay out the reasoning for dividing this Part into chapters by technique rather than by data type. As we have seen in the previous chapter,multifield visualization covers a broad range of types of data. It is therefore possible to discuss multifield visualization according to these data types, with each type covered in a separate chapter. However, it is also possible to approach the question by considering the techniques to be applied, many of which can be applied to multiple types of multifield data. In this chapter, we therefore discuss bothways of analysingmultifield visualization techniques, and why we have chosen to proceed according to technique rather than type in the subsequent chapters.
@incollection{wrro97577,
author = {H Hauser and H Carr},
series = {Mathematics and Visualization},
booktitle = {Scientific Visualization: Uncertainty, Multifield, Biomedical, and Scalable Visualization},
editor = {CD Hansen and M Chen and CR Johnson and AE Kaufman and H Hagen},
title = {Categorization},
address = {London},
publisher = {Springer-Verlag},
year = {2014},
journal = {Mathematics and Visualization},
pages = {111--117},
url = {https://eprints.whiterose.ac.uk/97577/},
abstract = {Multifield visualization covers a range of data types that can be visualized with many different techniques.We summarize both the data types and the categories of techniques, and lay out the reasoning for dividing this Part into chapters by technique rather than by data type. As we have seen in the previous chapter,multifield visualization covers a broad range of types of data. It is therefore possible to discuss multifield visualization according to these data types, with each type covered in a separate chapter. However, it is also possible to approach the question by considering the techniques to be applied, many of which can be applied to multiple types of multifield data. In this chapter, we therefore discuss bothways of analysingmultifield visualization techniques, and why we have chosen to proceed according to technique rather than type in the subsequent chapters.}
}
How can the notion of topological structures for single scalar fields be extended to multifields? In this paper we propose a definition for such structures using the concepts of Pareto optimality and Pareto dominance. Given a set of piecewise-linear, scalar functions over a common simplical complex of any dimension, our method finds regions of "consensus" among single fields' critical points and their connectivity relations. We show that our concepts are useful to data analysis on real-world examples originating from fluid-flow simulations; in two cases where the consensus of multiple scalar vortex predictors is of interest and in another case where one predictor is studied under different simulation parameters. We also compare the properties of our approach with current alternatives.
@article{wrro79280,
volume = {32},
number = {3 Pt 3},
month = {June},
author = {L Huettenberger and C Heine and H Carr and G Scheuermann and C Garth},
title = {Towards multifield scalar topology based on pareto optimality},
publisher = {Wiley},
year = {2013},
journal = {Computer Graphics Forum},
pages = {341 -- 350},
keywords = {Computer graphics; computational geometry and object modeling; geometric algorithms, languages, and systems},
url = {https://eprints.whiterose.ac.uk/79280/},
abstract = {How can the notion of topological structures for single scalar fields be extended to multifields? In this paper we propose a definition for such structures using the concepts of Pareto optimality and Pareto dominance. Given a set of piecewise-linear, scalar functions over a common simplical complex of any dimension, our method finds regions of "consensus" among single fields' critical points and their connectivity relations. We show that our concepts are useful to data analysis on real-world examples originating from fluid-flow simulations; in two cases where the consensus of multiple scalar vortex predictors is of interest and in another case where one predictor is studied under different simulation parameters. We also compare the properties of our approach with current alternatives.}
}
Many data sets are sampled on regular lattices in two, three or more dimensions, and recent work has shown that statistical properties of these data sets must take into account the continuity of the underlying physical phenomena. However, the effects of quantization on the statistics have not yet been accounted for. This paper therefore reconciles the previous papers to the underlying mathematical theory, develops a mathematical model of quantized statistics of continuous functions, and proves convergence of geometric approximations to continuous statistics for regular sampling lattices. In addition, the computational cost of various approaches is considered, and recommendations made about when to use each type of statistic.
@article{wrro79281,
volume = {19},
number = {2},
month = {February},
author = {B Duffy and HA Carr and T M{\"o}ller},
note = {(c) 2013 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
title = {Integrating isosurface statistics and histograms},
publisher = {Institute of Electrical and Electronics Engineers (IEEE)},
year = {2013},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {263 -- 277 (14)},
keywords = {Frequency distribution; geometric statistics; histograms; integration},
url = {https://eprints.whiterose.ac.uk/79281/},
abstract = {Many data sets are sampled on regular lattices in two, three or more dimensions, and recent work has shown that statistical properties of these data sets must take into account the continuity of the underlying physical phenomena. However, the effects of quantization on the statistics have not yet been accounted for. This paper therefore reconciles the previous papers to the underlying mathematical theory, develops a mathematical model of quantized statistics of continuous functions, and proves convergence of geometric approximations to continuous statistics for regular sampling lattices. In addition, the computational cost of various approaches is considered, and recommendations made about when to use each type of statistic.}
}
Contour trees and Reeb graphs are firmly embedded in scientific visualization for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net and demonstrate that Contour Trees for individual variables can be extracted from the Joint Contour Net.
@misc{wrro79239,
booktitle = {2013 IEEE Pacific Visualization Symposium},
title = {Joint contour nets: computation and properties},
author = {H Carr and D Duke},
publisher = {IEEE},
year = {2013},
pages = {161 -- 168},
journal = {Visualization Symposium (PacificVis), 2013 IEEE Pacific},
keywords = {Computational topology; Contour analysis; contour tree; Joint Contour Net; Multivariate; Reeb graph; Reeb space},
url = {https://eprints.whiterose.ac.uk/79239/},
abstract = {Contour trees and Reeb graphs are firmly embedded in scientific visualization for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net and demonstrate that Contour Trees for individual variables can be extracted from the Joint Contour Net.}
}
Contour Trees and Reeb Graphs are firmly embedded in scientific visualisation for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net, and demonstrate some of the properties that make it practically useful for visualisation, including accelerating computation by exploiting a relationship with rasterisation in the range of the function.
@article{wrro79282,
title = {Joint contour nets},
author = {DJ Duke and H Carr},
publisher = {Institute of Electrical and Electronics Engineers},
year = {2013},
note = {(c) 2013 IEEE. Personal use of this material is permitted. Permission from IEEE must be obtained for all other users, including reprinting/ republishing this material for advertising or promotional purposes, creating new collective works for resale or redistribution to servers or lists, or reuse of any copyrighted components of this work in other works.},
journal = {IEEE Transactions on Visualization and Computer Graphics},
keywords = {computational topology; contour tree; reeb graph; multivariate; contour analysis; reeb space; joint contour net},
url = {https://eprints.whiterose.ac.uk/79282/},
abstract = {Contour Trees and Reeb Graphs are firmly embedded in scientific visualisation for analysing univariate (scalar) fields. We generalize this analysis to multivariate fields with a data structure called the Joint Contour Net that quantizes the variation of multiple variables simultaneously. We report the first algorithm for constructing the Joint Contour Net, and demonstrate some of the properties that make it practically useful for visualisation, including accelerating computation by exploiting a relationship with rasterisation in the range of the function.}
}
Traditional documentation capabilities of laser scanning technology can be further exploited for urban modeling through the transformation of resulting point clouds into solid models compatible for computational analysis. This article introduces such a technique through the combination of an angle criterion and voxelization. As part of that, a k-nearest neighbor (kNN) searching algorithm is implemented using a predefined number of kNN points combined with a maximum radius of the neighborhood, something not previously implemented. From this sample, points are categorized as boundary or interior points based on an angle criterion. Façade features are determined based on underlying vertical and horizontal grid voxels of the feature boundaries by a grid clustering technique. The complete building model involving all full voxels is generated by employing the Flying Voxel method to relabel voxels that are inside openings or outside the façade as empty voxels. Experimental results on three different buildings, using four distinct sampling densities showed successful detection of all openings, reconstruction of all building façades, and automatic filling of all improper holes. The maximum nodal displacement divergence was 1.6\% compared to manually generated meshes from measured drawings. This fully automated approach rivals processing times of other techniques with the distinct advantage of extracting more boundary points, especially in less dense data sets ({\ensuremath{<}}175 points/m2), which may enable its more rapid exploitation of aerial laser scanning data and ultimately preclude needing a priori knowledge.
@article{wrro79317,
volume = {28},
number = {2},
author = {L Truong-Hong and DF Laefer and T Hinks and H Carr},
note = {(c) 2013, Wiley. This is the accepted version of the following article: Truong-Hong, L, Laefer, DF, Hinks, T and Carr, H () Combining an angle criterion with voxelization and the flying voxel method in reconstructing building models from LiDAR data. Computer-Aided Civil and Infrastructure Engineering, 28 (2). 112 - 129. ISSN 1093-9687, which has been published in final form at http://dx.doi.org/10.1111/j.1467-8667.2012.00761.x},
title = {Combining an angle criterion with voxelization and the flying voxel method in reconstructing building models from LiDAR data},
publisher = {Wiley},
year = {2013},
journal = {Computer-Aided Civil and Infrastructure Engineering},
pages = {112 -- 129},
url = {https://eprints.whiterose.ac.uk/79317/},
abstract = {Traditional documentation capabilities of laser scanning technology can be further exploited for urban modeling through the transformation of resulting point clouds into solid models compatible for computational analysis. This article introduces such a technique through the combination of an angle criterion and voxelization. As part of that, a k-nearest neighbor (kNN) searching algorithm is implemented using a predefined number of kNN points combined with a maximum radius of the neighborhood, something not previously implemented. From this sample, points are categorized as boundary or interior points based on an angle criterion. Fa{\c c}ade features are determined based on underlying vertical and horizontal grid voxels of the feature boundaries by a grid clustering technique. The complete building model involving all full voxels is generated by employing the Flying Voxel method to relabel voxels that are inside openings or outside the fa{\c c}ade as empty voxels. Experimental results on three different buildings, using four distinct sampling densities showed successful detection of all openings, reconstruction of all building fa{\c c}ades, and automatic filling of all improper holes. The maximum nodal displacement divergence was 1.6\% compared to manually generated meshes from measured drawings. This fully automated approach rivals processing times of other techniques with the distinct advantage of extracting more boundary points, especially in less dense data sets ({\ensuremath{<}}175 points/m2), which may enable its more rapid exploitation of aerial laser scanning data and ultimately preclude needing a priori knowledge.}
}
In nuclear science, density functional theory (DFT) is a powerful tool to model the complex interactions within the atomic nucleus, and is the primary theoretical approach used by physicists seeking a better understanding of fission. However DFT simulations result in complex multivariate datasets in which it is difficult to locate the crucial `scission' point at which one nucleus fragments into two, and to identify the precursors to scission. The Joint Contour Net (JCN) has recently been proposed as a new data structure for the topological analysis of multivariate scalar fields, analogous to the contour tree for univariate fields. This paper reports the analysis of DFT simulations using the JCN, the first application of the JCN technique to real data. It makes three contributions to visualization: (i) a set of practical methods for visualizing the JCN, (ii) new insight into the detection of nuclear scission, and (iii) an analysis of aesthetic criteria to drive further work on representing the JCN.
@article{wrro77400,
volume = {18},
number = {12},
month = {December},
author = {DJ Duke and H Carr and A Knoll and N Schunck and HA Nam and A Staszczak},
note = {(c) 2012, IEEE. This is an author produced version of a paper published in IEEE Transactions on Visualization and Computer Graphics. Uploaded with permission from the publisher.
},
title = {Visualizing nuclear scission through a multifield extension of topological analysis},
publisher = {Institute of Electrical and Electronics Engineers},
year = {2012},
journal = {IEEE Transactions on Visualization and Computer Graphics},
pages = {2033 -- 2040},
keywords = {topology; scalar fields; multifields},
url = {https://eprints.whiterose.ac.uk/77400/},
abstract = {In nuclear science, density functional theory (DFT) is a powerful tool to model the complex interactions within the atomic nucleus, and is the primary theoretical approach used by physicists seeking a better understanding of fission. However DFT simulations result in complex multivariate datasets in which it is difficult to locate the crucial `scission' point at which one nucleus fragments into two, and to identify the precursors to scission. The Joint Contour Net (JCN) has recently been proposed as a new data structure for the topological analysis of multivariate scalar fields, analogous to the contour tree for univariate fields. This paper reports the analysis of DFT simulations using the JCN, the first application of the JCN technique to real data. It makes three contributions to visualization: (i) a set of practical methods for visualizing the JCN, (ii) new insight into the detection of nuclear scission, and (iii) an analysis of aesthetic criteria to drive further work on representing the JCN.}
}