123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527 |
- # Authors: Berk Geveci, Axel Huebl, Utkarsh Ayachit
- #
- from vtkmodules.util.vtkAlgorithm import VTKPythonAlgorithmBase
- from .. import print_error
- try:
- import numpy as np
- import openpmd_api as io
- _has_openpmd = True
- except ImportError as ie:
- print_error(
- "Missing required Python modules/packages. Algorithms in this module may "
- "not work as expected! \n {0}".format(ie))
- _has_openpmd = False
- def createModifiedCallback(anobject):
- import weakref
- weakref_obj = weakref.ref(anobject)
- anobject = None
- def _markmodified(*args, **kwars):
- o = weakref_obj()
- if o is not None:
- o.Modified()
- return _markmodified
- class openPMDReader(VTKPythonAlgorithmBase):
- """A reader that reads openPMD format."""
- def __init__(self):
- VTKPythonAlgorithmBase.__init__(self, nInputPorts=0, nOutputPorts=2, outputType='vtkPartitionedDataSet')
- self._filename = None
- self._timevalues = None
- self._series = None
- self._timemap = {}
- from vtkmodules.vtkCommonCore import vtkDataArraySelection
- self._arrayselection = vtkDataArraySelection()
- self._arrayselection.AddObserver("ModifiedEvent", createModifiedCallback(self))
- self._speciesselection = vtkDataArraySelection()
- self._speciesselection.AddObserver("ModifiedEvent", createModifiedCallback(self))
- self._particlearrayselection = vtkDataArraySelection()
- self._particlearrayselection.AddObserver("ModifiedEvent", createModifiedCallback(self))
- def _get_update_time(self, outInfo):
- from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline
- executive = vtkStreamingDemandDrivenPipeline
- timevalues = self._timevalues
- if timevalues is None or len(timevalues) == 0:
- return None
- elif outInfo.Has(executive.UPDATE_TIME_STEP()) and len(timevalues) > 0:
- utime = outInfo.Get(executive.UPDATE_TIME_STEP())
- dtime = timevalues[0]
- for atime in timevalues:
- if atime > utime:
- return dtime
- else:
- dtime = atime
- return dtime
- else:
- assert(len(timevalues) > 0)
- return timevalues[0]
- def _get_array_selection(self):
- return self._arrayselection
- def _get_particle_array_selection(self):
- return self._particlearrayselection
- def _get_species_selection(self):
- return self._speciesselection
- def SetFileName(self, name):
- """Specify filename for the file to read."""
- if self._filename != name:
- self._filename = name
- self._timevalues = None
- if self._series:
- self._series = None
- self.Modified()
- def GetTimestepValues(self):
- return self._timevalues()
- def GetDataArraySelection(self):
- return self._get_array_selection()
- def GetSpeciesSelection(self):
- return self._get_species_selection()
- def GetParticleArraySelection(self):
- return self._get_particle_array_selection()
- def FillOutputPortInformation(self, port, info):
- from vtkmodules.vtkCommonDataModel import vtkDataObject
- if port == 0:
- info.Set(vtkDataObject.DATA_TYPE_NAME(), "vtkPartitionedDataSet")
- else:
- info.Set(vtkDataObject.DATA_TYPE_NAME(), "vtkPartitionedDataSetCollection")
- return 1
- def RequestInformation(self, request, inInfoVec, outInfoVec):
- global _has_openpmd
- if not _has_openpmd:
- print_error("Required Python module 'openpmd_api' missing!")
- return 0
- from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline, vtkAlgorithm
- executive = vtkStreamingDemandDrivenPipeline
- for i in (0,1):
- outInfo = outInfoVec.GetInformationObject(i)
- outInfo.Remove(executive.TIME_STEPS())
- outInfo.Remove(executive.TIME_RANGE())
- outInfo.Set(vtkAlgorithm.CAN_HANDLE_PIECE_REQUEST(), 1)
- # Why is this a string when it is None?
- if self._filename == 'None':
- return 1
- mfile = open(self._filename, "r")
- pattern = mfile.readlines()[0][0:-1]
- del mfile
- import os
- if not self._series:
- self._series = io.Series(os.path.dirname(self._filename)+'/'+pattern, io.Access_Type.read_only)
- # This is how we get time values and arrays
- self._timemap = {}
- timevalues = []
- arrays = set()
- particles = set()
- species = set()
- for idx, iteration in self._series.iterations.items():
- # extract the time
- if callable(iteration.time): # prior to openPMD-api 0.13.0
- time = iteration.time() * iteration.time_unit_SI()
- else:
- time = iteration.time * iteration.time_unit_SI
- timevalues.append(time)
- self._timemap[time] = idx
- arrays.update([
- mesh_name
- for mesh_name, mesh in iteration.meshes.items()])
- particles.update([
- species_name + "_" + record_name
- for species_name, species in iteration.particles.items()
- for record_name, record in species.items()
- ])
- species.update([
- species_name
- for species_name, _ in iteration.particles.items()
- ])
- for array in arrays:
- self._arrayselection.AddArray(array)
- for particle_array in particles:
- self._particlearrayselection.AddArray(particle_array)
- for species_name in species:
- self._speciesselection.AddArray(species_name)
- timesteps = list(self._series.iterations)
- self._timevalues = timevalues
- if len(timevalues) > 0:
- for i in (0,1):
- outInfo = outInfoVec.GetInformationObject(i)
- for t in timevalues:
- outInfo.Append(executive.TIME_STEPS(), t)
- outInfo.Append(executive.TIME_RANGE(), timevalues[0])
- outInfo.Append(executive.TIME_RANGE(), timevalues[-1])
- return 1
- def _get_array_and_component(self, itr, name):
- for mesh_name, mesh in itr.meshes.items():
- if mesh_name == name:
- return (mesh_name, None)
- for comp_name, _ in mesh.items():
- if name == mesh_name + "_" + comp_name:
- return (mesh_name, comp_name)
- return (None, None)
- def _get_particle_array_and_component(self, itr, name):
- for species_name, species in itr.particles.items():
- for record_name, record in species.items():
- if name == species_name + "_" + record_name:
- return (species_name, record_name)
- return (None, None)
- def _load_array(self, var, chunk_offset, chunk_extent):
- arrays = []
- for name, scalar in var.items():
- comp = scalar.load_chunk(chunk_offset, chunk_extent)
- self._series.flush()
- comp = comp * scalar.unit_SI
- arrays.append(comp)
- ncomp = len(var)
- if ncomp > 1:
- flt = np.ravel(arrays, order='F')
- return flt.reshape((flt.shape[0]//ncomp, ncomp))
- else:
- return arrays[0].flatten(order='F')
- def _find_array(self, itr, name):
- var = itr.meshes[name]
- theta_modes = None
- if var.geometry == io.Geometry.thetaMode:
- theta_modes = 3 # hardcoded, parse from geometry_parameters
- return (var,
- np.array(var.grid_spacing) * var.grid_unit_SI,
- np.array(var.grid_global_offset) * var.grid_unit_SI,
- theta_modes)
- def _get_num_particles(self, itr, species):
- sp = itr.particles[species]
- var = sp["position"]
- return var['x'].shape[0]
- def _load_particle_array(self, itr, species, name, start, end):
- sp = itr.particles[species]
- var = sp[name]
- arrays = []
- for name, scalar in var.items():
- comp = scalar.load_chunk([start], [end-start+1])
- self._series.flush()
- comp = comp * scalar.unit_SI
- arrays.append(comp)
- ncomp = len(var)
- if ncomp > 1:
- flt = np.ravel(arrays, order='F')
- return flt.reshape((flt.shape[0]//ncomp, ncomp))
- else:
- return arrays[0]
- def _load_particles(self, itr, species, start, end):
- sp = itr.particles[species]
- var = sp["position"]
- ovar = sp["positionOffset"]
- position_arrays = []
- for name, scalar in var.items():
- pos = scalar.load_chunk([start], [end-start+1])
- self._series.flush()
- pos = pos * scalar.unit_SI
- off = ovar[name].load_chunk([start], [end-start+1])
- self._series.flush()
- off = off * ovar[name].unit_SI
- position_arrays.append(pos + off)
- flt = np.ravel(position_arrays, order='F')
- num_components = len(var) # 1D, 2D and 3D positions
- flt = flt.reshape((flt.shape[0]//num_components, num_components))
- # 1D and 2D particles: pad additional components with zero
- while flt.shape[1] < 3:
- flt = np.column_stack([flt, np.zeros_like(flt[:, 0])])
- return flt
- def _load_species(self, itr, species, arrays, piece, npieces, ugrid):
- nparticles = self._get_num_particles(itr, species)
- nlocalparticles = nparticles // npieces
- start = nlocalparticles * piece
- end = start + nlocalparticles - 1
- if piece == npieces - 1:
- end = nparticles - 1
- pts = self._load_particles(itr, species, start, end)
- npts = pts.shape[0]
- ugrid.Points = pts
- for array in arrays:
- if array[1] == 'position' or array[1] == 'positionOffset':
- continue
- ugrid.PointData.append(
- self._load_particle_array(itr, array[0], array[1], start, end),
- array[1])
- from vtkmodules.vtkCommonDataModel import vtkCellArray
- ca = vtkCellArray()
- if npts < np.iinfo(np.int32).max:
- dtype = np.int32
- else:
- dtype = np.int64
- offsets = np.linspace(0, npts, npts+1, dtype=dtype)
- cells = np.linspace(0, npts-1, npts, dtype=dtype)
- from vtkmodules.numpy_interface import dataset_adapter
- offsets = dataset_adapter.numpyTovtkDataArray(offsets)
- offsets2 = offsets.NewInstance()
- offsets2.DeepCopy(offsets)
- cells = dataset_adapter.numpyTovtkDataArray(cells)
- cells2 = cells.NewInstance()
- cells2.DeepCopy(cells)
- ca.SetData(offsets2, cells2)
- from vtkmodules.util import vtkConstants
- ugrid.VTKObject.SetCells(vtkConstants.VTK_VERTEX, ca)
- def _RequestFieldData(self, executive, output, outInfo):
- from vtkmodules.numpy_interface import dataset_adapter as dsa
- from vtkmodules.vtkCommonDataModel import vtkImageData
- from vtkmodules.vtkCommonExecutionModel import vtkExtentTranslator
- piece = outInfo.Get(executive.UPDATE_PIECE_NUMBER())
- npieces = outInfo.Get(executive.UPDATE_NUMBER_OF_PIECES())
- nghosts = outInfo.Get(executive.UPDATE_NUMBER_OF_GHOST_LEVELS())
- et = vtkExtentTranslator()
- data_time = self._get_update_time(outInfo)
- idx = self._timemap[data_time]
- itr = self._series.iterations[idx]
- arrays = []
- narrays = self._arrayselection.GetNumberOfArrays()
- for i in range(narrays):
- if self._arrayselection.GetArraySetting(i):
- name = self._arrayselection.GetArrayName(i)
- arrays.append((name, self._find_array(itr, name)))
- shp = None
- spacing = None
- theta_modes = None
- grid_offset = None
- for _, ary in arrays:
- var = ary[0]
- for name, scalar in var.items():
- shape = scalar.shape
- break
- spc = list(ary[1])
- if not spacing:
- spacing = spc
- elif spacing != spc: # all meshes need to have the same spacing
- return 0
- offset = list(ary[2])
- if not grid_offset:
- grid_offset = offset
- elif grid_offset != offset: # all meshes need to have the same spacing
- return 0
- if not shp:
- shp = shape
- elif shape != shp: # all arrays needs to have the same shape
- return 0
- if not theta_modes:
- theta_modes = ary[3]
- # fields/meshes: RZ
- if theta_modes and shp is not None:
- et.SetWholeExtent(0, shp[0]-1, 0, shp[1]-1, 0, shp[2]-1)
- et.SetSplitModeToZSlab() # note: Y and Z are both fine
- et.SetPiece(piece)
- et.SetNumberOfPieces(npieces)
- # et.SetGhostLevel(nghosts)
- et.PieceToExtentByPoints()
- ext = et.GetExtent()
- chunk_offset = [ext[0], ext[2], ext[4]]
- chunk_extent = [ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1]
- data = []
- nthetas = 100 # user parameter
- thetas = np.linspace(0., 2.*np.pi, nthetas)
- chunk_cyl_shape = (chunk_extent[1], chunk_extent[2], nthetas) # z, r, theta
- for name, var in arrays:
- cyl_values = np.zeros(chunk_cyl_shape)
- values = self._load_array(var[0], chunk_offset, chunk_extent)
- self._series.flush()
- print(chunk_cyl_shape)
- print(values.shape)
- print("+++++++++++")
- for ntheta in range(nthetas):
- cyl_values[:, :, ntheta] += values[0, :, :]
- data.append((name, cyl_values))
- # add all other modes via loop
- # for m in range(theta_modes):
- cyl_spacing = [spacing[0], spacing[1], thetas[1]-thetas[0]]
- z_coord = np.linspace(0., cyl_spacing[0] * chunk_cyl_shape[0], chunk_cyl_shape[0])
- r_coord = np.linspace(0., cyl_spacing[1] * chunk_cyl_shape[1], chunk_cyl_shape[1])
- t_coord = thetas
- # to cartesian
- print(z_coord.shape, r_coord.shape, t_coord.shape)
- cyl_coords = np.meshgrid(r_coord, z_coord, t_coord)
- rs = cyl_coords[1]
- zs = cyl_coords[0]
- thetas = cyl_coords[2]
- y_coord = rs * np.sin(thetas)
- x_coord = rs * np.cos(thetas)
- z_coord = zs
- # mesh_pts = np.zeros((chunk_cyl_shape[0], chunk_cyl_shape[1], chunk_cyl_shape[2], 3))
- # mesh_pts[:, :, :, 0] = z_coord
- img = vtkImageData()
- img.SetExtent(
- chunk_offset[1], chunk_offset[1] + chunk_cyl_shape[0] - 1,
- chunk_offset[2], chunk_offset[2] + chunk_cyl_shape[1] - 1,
- 0, nthetas-1)
- img.SetSpacing(cyl_spacing)
- imgw = dsa.WrapDataObject(img)
- output.SetPartition(0, img)
- for name, array in data:
- # print(array.shape)
- # print(array.transpose(2,1,0).flatten(order='C').shape)
- imgw.PointData.append(array.transpose(2,1,0).flatten(order='C'), name)
- # data = []
- # for name, var in arrays:
- # unit_SI = var[0].unit_SI
- # data.append((name, unit_SI * var[0].load_chunk(chunk_offset, chunk_extent)))
- # self._series.flush()
- # fields/meshes: 1D-3D
- elif shp is not None:
- whole_extent = []
- # interleave shape with zeros
- for s in shp:
- whole_extent.append(0)
- whole_extent.append(s-1)
- # 1D and 2D data: pad with 0, 0 for extra dimensions
- while len(whole_extent) < 6:
- whole_extent.append(0)
- whole_extent.append(0)
- et.SetWholeExtent(*whole_extent)
- et.SetPiece(piece)
- et.SetNumberOfPieces(npieces)
- et.SetGhostLevel(nghosts)
- et.PieceToExtent()
- ext = et.GetExtent()
- chunk_offset = [ext[0], ext[2], ext[4]]
- chunk_extent = [ext[1] - ext[0] + 1, ext[3] - ext[2] + 1, ext[5] - ext[4] + 1]
- # 1D and 2D data: remove extra dimensions for load
- del chunk_offset[len(shp):]
- del chunk_extent[len(shp):]
- data = []
- for name, var in arrays:
- values = self._load_array(var[0], chunk_offset, chunk_extent)
- self._series.flush()
- data.append((name, values))
- # 1D and 2D data: pad spacing with extra 1 and grid_offset with
- # extra 9 values until 3D
- i = iter(spacing)
- spacing = [next(i, 1) for _ in range(3)]
- i = iter(grid_offset)
- grid_offset = [next(i, 0) for _ in range(3)]
- img = vtkImageData()
- img.SetExtent(ext[0], ext[1], ext[2], ext[3], ext[4], ext[5])
- img.SetSpacing(spacing)
- img.SetOrigin(grid_offset)
- et.SetGhostLevel(0)
- et.PieceToExtent()
- ext = et.GetExtent()
- ext = [ext[0], ext[1], ext[2], ext[3], ext[4], ext[5]]
- img.GenerateGhostArray(ext)
- imgw = dsa.WrapDataObject(img)
- output.SetPartition(0, img)
- for name, array in data:
- imgw.PointData.append(array, name)
- def _RequestParticleData(self, executive, poutput, outInfo):
- from vtkmodules.numpy_interface import dataset_adapter as dsa
- from vtkmodules.vtkCommonDataModel import vtkUnstructuredGrid, vtkPartitionedDataSet
- piece = outInfo.Get(executive.UPDATE_PIECE_NUMBER())
- npieces = outInfo.Get(executive.UPDATE_NUMBER_OF_PIECES())
- data_time = self._get_update_time(outInfo)
- idx = self._timemap[data_time]
- itr = self._series.iterations[idx]
- array_by_species = {}
- narrays = self._particlearrayselection.GetNumberOfArrays()
- for i in range(narrays):
- if self._particlearrayselection.GetArraySetting(i):
- name = self._particlearrayselection.GetArrayName(i)
- names = self._get_particle_array_and_component(
- itr, name)
- if names[0] and self._speciesselection.ArrayIsEnabled(names[0]):
- if not names[0] in array_by_species:
- array_by_species[names[0]] = []
- array_by_species[names[0]].append(names)
- ids = 0
- for species, arrays in array_by_species.items():
- pds = vtkPartitionedDataSet()
- ugrid = vtkUnstructuredGrid()
- pds.SetPartition(0, ugrid)
- poutput.SetPartitionedDataSet(ids, pds)
- ids += 1
- self._load_species(
- itr, species, arrays, piece, npieces, dsa.WrapDataObject(ugrid))
- def RequestData(self, request, inInfoVec, outInfoVec):
- global _has_openpmd
- if not _has_openpmd:
- print_error("Required Python module 'openpmd_api' missing!")
- return 0
- from vtkmodules.vtkCommonDataModel import vtkPartitionedDataSet, vtkPartitionedDataSetCollection
- from vtkmodules.vtkCommonExecutionModel import vtkStreamingDemandDrivenPipeline
- executive = vtkStreamingDemandDrivenPipeline
- numInfo = outInfoVec.GetNumberOfInformationObjects()
- for i in range(numInfo):
- outInfo = outInfoVec.GetInformationObject(i)
- if i == 0:
- output = vtkPartitionedDataSet.GetData(outInfoVec, 0)
- self._RequestFieldData(executive, output, outInfo)
- elif i == 1:
- poutput = vtkPartitionedDataSetCollection.GetData(outInfoVec, 1)
- self._RequestParticleData(executive, poutput, outInfo)
- else:
- print_error("numInfo number is wrong! "
- "It should be exactly 2, is=", numInfo)
- return 0
- return 1
|