Skip to content

Commit

Permalink
Changing to a property
Browse files Browse the repository at this point in the history
  • Loading branch information
matthewturk committed Jul 19, 2020
1 parent 499ce82 commit 2519f0f
Show file tree
Hide file tree
Showing 4 changed files with 23 additions and 18 deletions.
9 changes: 6 additions & 3 deletions yt/frontends/flash/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -164,7 +164,10 @@ def __init__(self, ds):
self._position_fields = [
self._particle_fields["particle_pos%s" % ax] for ax in "xyz"
]
self._chunksize = 32 ** 3

@property
def chunksize(self):
return 32 ** 3

def _read_fluid_selection(self, chunks, selector, fields, size):
raise NotImplementedError
Expand Down Expand Up @@ -224,7 +227,7 @@ def _initialize_index(self, data_file, regions):
morton = np.empty(pcount, dtype="uint64")
ind = 0
while ind < pcount:
npart = min(self._chunksize, pcount - ind)
npart = min(self.chunksize, pcount - ind)
pos = np.empty((npart, 3), dtype="=f8")
pos[:, 0] = p_fields[ind : ind + npart, px]
pos[:, 1] = p_fields[ind : ind + npart, py]
Expand All @@ -237,7 +240,7 @@ def _initialize_index(self, data_file, regions):
data_file.ds.domain_left_edge,
data_file.ds.domain_right_edge,
)
ind += self._chunksize
ind += self.chunksize
return morton

_pcount = None
Expand Down
4 changes: 2 additions & 2 deletions yt/frontends/sdf/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ def _initialize_index(self, data_file, regions):
morton = np.empty(pcount, dtype="uint64")
ind = 0
while ind < pcount:
npart = min(self.ds.index._chunksize, pcount - ind)
npart = min(self.ds.index.chunksize, pcount - ind)
pos = np.empty((npart, 3), dtype=x.dtype)
pos[:, 0] = x[ind : ind + npart]
pos[:, 1] = y[ind : ind + npart]
Expand All @@ -78,7 +78,7 @@ def _initialize_index(self, data_file, regions):
data_file.ds.domain_left_edge,
data_file.ds.domain_right_edge,
)
ind += self.ds.index._chunksize
ind += self.ds.index.chunksize
return morton

def _identify_fields(self, data_file):
Expand Down
10 changes: 5 additions & 5 deletions yt/frontends/tipsy/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -90,7 +90,7 @@ def _read_particle_coords(self, chunks, ptf):
for chunk in chunks:
for obj in chunk.objs:
data_files.update(obj.data_files)
chunksize = self.ds.index._chunksize
chunksize = self.ds.index.chunksize
for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)):
poff = data_file.field_offsets
tp = data_file.total_particles
Expand Down Expand Up @@ -176,7 +176,7 @@ def _read_particle_fields(self, chunks, ptf, selector):
continue
f.seek(poff[ptype])
afields = list(set(field_list).intersection(self._aux_fields))
count = min(self.ds.index._chunksize, tp[ptype])
count = min(self.ds.index.chunksize, tp[ptype])
p = np.fromfile(f, self._pdtypes[ptype], count=count)
auxdata = []
for afield in afields:
Expand Down Expand Up @@ -252,7 +252,7 @@ def _update_domain(self, data_file):
continue
stop = ind + count
while ind < stop:
c = min(self.ds.index._chunksize, stop - ind)
c = min(self.ds.index.chunksize, stop - ind)
pp = np.fromfile(f, dtype=self._pdtypes[ptype], count=c)
np.minimum(
mi,
Expand Down Expand Up @@ -467,10 +467,10 @@ def _calculate_particle_offsets_aux(self, data_file):
for i, ptype in enumerate(self._ptypes):
if data_file.total_particles[ptype] == 0:
continue
elif params[npart_mapping[ptype]] > self.ds.index._chunksize:
elif params[npart_mapping[ptype]] > self.ds.index.chunksize:
for j in range(i):
npart = params[npart_mapping[self._ptypes[j]]]
if npart > self.ds.index._chunksize:
if npart > self.ds.index.chunksize:
pos += npart * size
pos += data_file.start * size
aux_fields_offsets[afield][ptype] = pos
Expand Down
18 changes: 10 additions & 8 deletions yt/geometry/particle_geometry_handler.py
Original file line number Diff line number Diff line change
Expand Up @@ -13,12 +13,9 @@
from yt.utilities.lib.fnv_hash import fnv_hash
from yt.utilities.logger import ytLogger as mylog

CHUNKSIZE = 64 ** 3


class ParticleIndex(Index):
"""The Index subclass for particle datasets"""
_chunksize = 64**3

def __init__(self, ds, dataset_type):
self.dataset_type = dataset_type
Expand Down Expand Up @@ -46,6 +43,11 @@ def _get_particle_type_counts(self):
def convert(self, unit):
return self.dataset.conversion_factors[unit]

@property
def chunksize(self):
# This can be overridden in subclasses
return 64 ** 3

def _setup_filenames(self):
template = self.dataset.filename_template
ndoms = self.dataset.file_count
Expand All @@ -54,20 +56,20 @@ def _setup_filenames(self):
fi = 0
for i in range(int(ndoms)):
start = 0
if self._chunksize > 0:
end = start + self._chunksize
if self.chunksize > 0:
end = start + self.chunksize
else:
end = None
while True:
df = cls(self.dataset, self.io, template % {'num':i}, fi, (start, end))
df = cls(self.dataset, self.io, template % {"num": i}, fi, (start, end))
if max(df.total_particles.values()) == 0:
break
fi += 1
self.data_files.append(df)
if self._chunksize <= 0:
if self.chunksize <= 0:
break
start = end
end += self._chunksize
end += self.chunksize
self.total_particles = sum(
sum(d.total_particles.values()) for d in self.data_files
)
Expand Down

0 comments on commit 2519f0f

Please sign in to comment.