Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 2 additions & 3 deletions openmc/filter.py
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,6 @@
)



class FilterMeta(ABCMeta):
"""Metaclass for filters that ensures class names are appropriate."""

Expand Down Expand Up @@ -1861,7 +1860,7 @@ def __init__(self, particles, energies=None, filter_id=None):
def __repr__(self):
string = type(self).__name__ + '\n'
string += '{: <16}=\t{}\n'.format('\tParticles',
[str(p) for p in self.particles])
[str(p) for p in self.particles])
if self.energies is not None:
string += '{: <16}=\t{}\n'.format('\tEnergies', self.energies)
string += '{: <16}=\t{}\n'.format('\tID', self.id)
Expand Down Expand Up @@ -2171,7 +2170,7 @@ def paths(self):
if self._paths is None:
if not hasattr(self, '_geometry'):
raise ValueError(
"Model must be exported before the 'paths' attribute is" \
"Model must be exported before the 'paths' attribute is"
"available for a DistribcellFilter.")

# Determine paths for cell instances
Expand Down
260 changes: 247 additions & 13 deletions openmc/mesh.py
Original file line number Diff line number Diff line change
Expand Up @@ -662,7 +662,7 @@ def num_mesh_cells(self):
def write_data_to_vtk(self,
filename: PathLike,
datasets: dict | None = None,
volume_normalization: bool = True,
volume_normalization: bool | None = None,
curvilinear: bool = False):
"""Creates a VTK object of the mesh

Expand All @@ -678,9 +678,10 @@ def write_data_to_vtk(self,
with structured indexing in "C" ordering. See the "expand_dims" flag
of :meth:`~openmc.Tally.get_reshaped_data` on reshaping tally data when using
:class:`~openmc.MeshFilter`'s.
volume_normalization : bool, optional
volume_normalization : bool or None, optional
Whether or not to normalize the data by the volume of the mesh
elements.
elements. When None (default), the format-appropriate default is
used: True for legacy ASCII .vtk files, False for .vtkhdf files.
curvilinear : bool
Whether or not to write curvilinear elements. Only applies to
``SphericalMesh`` and ``CylindricalMesh``.
Expand Down Expand Up @@ -712,6 +713,18 @@ def write_data_to_vtk(self,
>>> heating = tally.get_reshaped_data(expand_dims=True)
>>> mesh.write_data_to_vtk({'heating': heating})
"""
if Path(filename).suffix == ".vtkhdf":
write_impl = getattr(self, '_write_vtk_hdf5', None)
if write_impl is None:
raise NotImplementedError(f"VTKHDF output not implemented for {type(self).__name__}")
# write_impl is a bound method – do NOT pass self again
write_impl(filename, datasets, volume_normalization)
return None

# legacy ASCII .vtk default is True
norm = volume_normalization if volume_normalization is not None else True

# vtk is an optional dependency only needed for the legacy ASCII path
import vtk
from vtk.util import numpy_support as nps

Expand All @@ -728,20 +741,26 @@ def write_data_to_vtk(self,
# maintain a list of the datasets as added to the VTK arrays to
# ensure they persist in memory until the file is written
datasets_out = []
# Regular/Rectilinear meshes store data in C (ijk) order which
# matches VTK's expected ordering directly — no transpose needed.
# Curvilinear meshes (Cylindrical, Spherical) require a transpose
# to convert from C ordering to the Fortran (kji) ordering that VTK
# expects.
# TODO: update to "C" ordering throughout
needs_transpose = not isinstance(
self, (RegularMesh, RectilinearMesh))
for label, dataset in datasets.items():
dataset = self._reshape_vtk_dataset(dataset)
self._check_vtk_dataset(label, dataset)
# If the array data is 3D, assume is in C ordering and transpose
# before flattening to match the ordering expected by the VTK
# array based on the way mesh indices are ordered in the Python
# API
# TODO: update to "C" ordering throughout
if dataset.ndim == 3:
dataset = dataset.T.ravel()
dataset = dataset.T.ravel() if needs_transpose else dataset.ravel()
datasets_out.append(dataset)

if volume_normalization:
dataset /= self.volumes.T.ravel()
if norm:
if needs_transpose:
dataset /= self.volumes.T.ravel()
else:
dataset /= self.volumes.ravel()

dataset_array = vtk.vtkDoubleArray()
dataset_array.SetName(label)
Expand Down Expand Up @@ -1477,6 +1496,72 @@ def get_indices_at_coords(self, coords: Sequence[float]) -> tuple:
indices = np.floor((coords_array - lower_left) / spacing).astype(int)
return tuple(int(i) for i in indices[:ndim])

def _write_vtk_hdf5(self, filename, datasets, volume_normalization):
"""Write RegularMesh as VTKHDF StructuredGrid format."""
dims = self.dimension
ndim = len(dims)

# Vertex dimensions (cells + 1) – store only ndim entries so that
# 1-D and 2-D meshes carry the right number of dimensions.
vertex_dims = [d + 1 for d in dims]

# Build explicit point coordinates. Pad coordinate arrays to 3-D so
# that every point has an (x, y, z) triple; extra coordinates are 0.
coords_1d = []
for i in range(ndim):
c = np.linspace(self.lower_left[i], self.upper_right[i], dims[i] + 1)
coords_1d.append(c)
while len(coords_1d) < 3:
coords_1d.append(np.array([0.0]))

# np.meshgrid with indexing='ij' → axis 0 = x, axis 1 = y, axis 2 = z
xx, yy, zz = np.meshgrid(*coords_1d, indexing='ij')
# Flatten in Fortran (x-fastest) order for VTK point ordering
points = np.column_stack([
xx.ravel(order='F'),
yy.ravel(order='F'),
zz.ravel(order='F'),
]).astype(np.float64) # shape (n_points, 3)

with h5py.File(filename, "w") as f:
root = f.create_group("VTKHDF")
root.attrs["Version"] = (2, 1)
_type = "StructuredGrid".encode("ascii")
root.attrs.create(
"Type",
_type,
dtype=h5py.string_dtype("ascii", len(_type)),
)
root.create_dataset("Dimensions", data=vertex_dims, dtype="i8")
root.create_dataset("Points", data=points, dtype="f8")

cell_data_group = root.create_group("CellData")

if not datasets:
return

for name, data in datasets.items():
data = self._reshape_vtk_dataset(data)
if data.ndim > 1 and data.shape != self.dimension:
raise ValueError(
f'Cannot apply multidimensional dataset "{name}" with '
f"shape {data.shape} to mesh {self.id} "
f"with dimensions {self.dimension}"
)
if data.size != self.n_elements:
raise ValueError(
f"The size of the dataset '{name}' ({data.size}) should be"
f" equal to the number of mesh cells ({self.n_elements})"
)

if volume_normalization:
data = data / self.volumes

flat_data = data.T.ravel() if data.ndim > 1 else data.ravel()
cell_data_group.create_dataset(
name, data=flat_data, dtype="float64", chunks=True
)


def Mesh(*args, **kwargs):
warnings.warn("Mesh has been renamed RegularMesh. Future versions of "
Expand Down Expand Up @@ -1730,6 +1815,55 @@ def get_indices_at_coords(self, coords: Sequence[float]) -> tuple[int, int, int]

return tuple(indices)

def _write_vtk_hdf5(self, filename, datasets, volume_normalization):
"""Write RectilinearMesh as VTK-HDF5 StructuredGrid format.

Note: vtkRectilinearGrid is not part of the VTKHDF spec yet, so
StructuredGrid with explicit point coordinates is used instead.
"""
nx, ny, nz = self.dimension
vertex_dims = [nx + 1, ny + 1, nz + 1]

vertices = np.stack(np.meshgrid(
self.x_grid, self.y_grid, self.z_grid, indexing='ij'
), axis=-1)

with h5py.File(filename, "w") as f:
root = f.create_group("VTKHDF")
root.attrs["Version"] = (2, 1)
root.attrs["Type"] = b"StructuredGrid"
root.create_dataset("Dimensions", data=vertex_dims, dtype="i8")

points = vertices.reshape(-1, 3)
root.create_dataset("Points", data=points.astype(np.float64), dtype="f8")

cell_data_group = root.create_group("CellData")

if not datasets:
return

for name, data in datasets.items():
data = self._reshape_vtk_dataset(data)
if data.ndim > 1 and data.shape != self.dimension:
raise ValueError(
f'Cannot apply dataset "{name}" with '
f"shape {data.shape} to mesh {self.id} "
f"with dimensions {self.dimension}"
)
if data.size != self.n_elements:
raise ValueError(
f"The size of the dataset '{name}' ({data.size}) should be"
f" equal to the number of mesh cells ({self.n_elements})"
)

if volume_normalization:
data = data / self.volumes

flat_data = data.T.ravel() if data.ndim > 1 else data.ravel()
cell_data_group.create_dataset(
name, data=flat_data, dtype="float64", chunks=True
)


class CylindricalMesh(StructuredMesh):
"""A 3D cylindrical mesh
Expand Down Expand Up @@ -2183,6 +2317,53 @@ def _convert_to_cartesian(arr, origin: Sequence[float]):
arr[..., 2] += origin[2]
return arr

def _write_vtk_hdf5(self, filename, datasets, volume_normalization):
"""Write CylindricalMesh as VTK-HDF5 StructuredGrid format."""
nr, nphi, nz = self.dimension
vertex_dims = [nr + 1, nphi + 1, nz + 1]

R, Phi, Z = np.meshgrid(self.r_grid, self.phi_grid, self.z_grid, indexing='ij')
X = R * np.cos(Phi) + self.origin[0]
Y = R * np.sin(Phi) + self.origin[1]
Z = Z + self.origin[2]
vertices = np.stack([X, Y, Z], axis=-1)

with h5py.File(filename, "w") as f:
root = f.create_group("VTKHDF")
root.attrs["Version"] = (2, 1)
root.attrs["Type"] = b"StructuredGrid"
root.create_dataset("Dimensions", data=vertex_dims, dtype="i8")

points = vertices.reshape(-1, 3)
root.create_dataset("Points", data=points.astype(np.float64), dtype="f8")

cell_data_group = root.create_group("CellData")

if not datasets:
return

for name, data in datasets.items():
data = self._reshape_vtk_dataset(data)
if data.ndim > 1 and data.shape != self.dimension:
raise ValueError(
f'Cannot apply dataset "{name}" with '
f"shape {data.shape} to mesh {self.id} "
f"with dimensions {self.dimension}"
)
if data.size != self.n_elements:
raise ValueError(
f"The size of the dataset '{name}' ({data.size}) should be"
f" equal to the number of mesh cells ({self.n_elements})"
)

if volume_normalization:
data = data / self.volumes

flat_data = data.T.ravel() if data.ndim > 1 else data.ravel()
cell_data_group.create_dataset(
name, data=flat_data, dtype="float64", chunks=True
)


class SphericalMesh(StructuredMesh):
"""A 3D spherical mesh
Expand Down Expand Up @@ -2570,6 +2751,55 @@ def get_indices_at_coords(self, coords: Sequence[float]) -> tuple:
"get_indices_at_coords is not yet implemented for SphericalMesh"
)

def _write_vtk_hdf5(self, filename, datasets, volume_normalization):
"""Write SphericalMesh as VTK-HDF5 StructuredGrid format."""
nr, ntheta, nphi = self.dimension
vertex_dims = [nr + 1, ntheta + 1, nphi + 1]

R, Theta, Phi = np.meshgrid(
self.r_grid, self.theta_grid, self.phi_grid, indexing='ij'
)
X = R * np.sin(Theta) * np.cos(Phi) + self.origin[0]
Y = R * np.sin(Theta) * np.sin(Phi) + self.origin[1]
Z = R * np.cos(Theta) + self.origin[2]
vertices = np.stack([X, Y, Z], axis=-1)

with h5py.File(filename, "w") as f:
root = f.create_group("VTKHDF")
root.attrs["Version"] = (2, 1)
root.attrs["Type"] = b"StructuredGrid"
root.create_dataset("Dimensions", data=vertex_dims, dtype="i8")

points = vertices.reshape(-1, 3)
root.create_dataset("Points", data=points.astype(np.float64), dtype="f8")

cell_data_group = root.create_group("CellData")

if not datasets:
return

for name, data in datasets.items():
data = self._reshape_vtk_dataset(data)
if data.ndim > 1 and data.shape != self.dimension:
raise ValueError(
f'Cannot apply dataset "{name}" with '
f"shape {data.shape} to mesh {self.id} "
f"with dimensions {self.dimension}"
)
if data.size != self.n_elements:
raise ValueError(
f"The size of the dataset '{name}' ({data.size}) should be"
f" equal to the number of mesh cells ({self.n_elements})"
)

if volume_normalization:
data = data / self.volumes

flat_data = data.T.ravel() if data.ndim > 1 else data.ravel()
cell_data_group.create_dataset(
name, data=flat_data, dtype="float64", chunks=True
)


def require_statepoint_data(func):
@wraps(func)
Expand Down Expand Up @@ -3026,6 +3256,10 @@ def _write_data_to_vtk_hdf5_format(
datasets: dict | None = None,
volume_normalization: bool = True,
):
"""Write UnstructuredMesh as VTK-HDF5 UnstructuredGrid format.

Supports linear tetrahedra and linear hexahedra elements.
"""
def append_dataset(dset, array):
"""Convenience function to append data to an HDF5 dataset"""
origLen = dset.shape[0]
Expand Down Expand Up @@ -3104,7 +3338,7 @@ def append_dataset(dset, array):
cell_data_group = root.create_group("CellData")

for name, data in datasets.items():

cell_data_group.create_dataset(
name, (0,), maxshape=(None,), dtype="float64", chunks=True
)
Expand Down Expand Up @@ -3242,4 +3476,4 @@ def _read_meshes(elem):
_HEX_MIDPOINT_CONN += ((2, (0, 0, 0)),
(2, (1, 0, 0)),
(2, (1, 1, 0)),
(2, (0, 1, 0)))
(2, (0, 1, 0)))
Loading
Loading