From 9a066c32361d53e8c19a1e0c97378732703bff63 Mon Sep 17 00:00:00 2001 From: Michael Zingale Date: Fri, 17 Apr 2020 12:17:36 -0400 Subject: [PATCH 001/653] This adds a method to PWViewerMPL to automated exporting figures to a matplotlib grid recover from a deleted branch. --- yt/visualization/plot_window.py | 74 +++++++++++++++++++++++++++++++++ 1 file changed, 74 insertions(+) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 8cb1a6203a1..3b842b6d8d7 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1,5 +1,7 @@ import numpy as np import matplotlib +import matplotlib.pyplot as plt +from mpl_toolkits.axes_grid1 import ImageGrid import types import sys @@ -1267,6 +1269,78 @@ def show_axes(self, field=None): self.plots[f].show_axes() return self + def export_to_mpl_figure(self, nrows_ncols, axes_pad=1.0, + label_mode="L", + cbar_location="right", cbar_size="5%", + cbar_mode="each", cbar_pad="0%"): + r""" + Creates a matplotlib figure object with the specified axes arrangement, nrows_ncols, + and maps the underlying figures to the matplotlib axes. Note that all of these + parameters are fed directly to the matplotlib ImageGrid class to create the new figure + layout. + + Parameters + ---------- + + nrows_ncols : tuple + the number of rows and columns of the axis grid (e.g., nrows_ncols=(2,2,)) + axes_pad : float + padding between axes in inches + label_mode : one of "L", "1", "all" + arrangement of axes that are labeled + cbar_location : one of "left", "right", "bottom", "top" + where to place the colorbar + cbar_size : string (percentage) + scaling of the colorbar (e.g., "5%") + cbar_mode : one of "each", "single", "edge", None + how to represent the colorbar + cbar_pad : string (percentage) + padding between the axis and colorbar (e.g. "5%") + + Returns + ------- + + The return is a matplotlib figure object. + + Examples + -------- + + >>> import yt + >>> ds = yt.load_sample("IsolatedGalaxy") + >>> fields = ['density', 'velocity_x', 'velocity_y', 'velocity_magnitude'] + >>> p = yt.SlicePlot(ds, 'z', fields) + >>> p.set_log('velocity_x', False) + >>> p.set_log('velocity_y', False) + >>> fig = p.export_to_mpl_figure((2,2)) + >>> fig.tight_layout() + >>> fig.savefig("test.png") + + """ + + fig = plt.figure() + grid = ImageGrid(fig, 111, + nrows_ncols=nrows_ncols, + axes_pad=axes_pad, + label_mode=label_mode, + cbar_location=cbar_location, + cbar_size=cbar_size, + cbar_mode=cbar_mode, + cbar_pad=cbar_pad) + + fields = self.fields + if len(fields) > len(grid): + raise IndexError("not enough axes for the number of fields") + + for i, f in enumerate(self.fields): + plot = self.plots[f] + plot.figure = fig + plot.axes = grid[i].axes + plot.cax = grid.cbar_axes[i] + + self._setup_plots() + + return fig + class AxisAlignedSlicePlot(PWViewerMPL): r"""Creates a slice plot from a dataset From 333436c9605d7be6615d0ea3e386e206c494c9a9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 21 Jan 2020 15:49:59 +0000 Subject: [PATCH 002/653] Add support for fast neighbour search --- setup.py | 5 + yt/frontends/ramses/data_structures.py | 2 +- yt/frontends/ramses/io_utils.pyx | 2 +- yt/geometry/fake_octree.pyx | 6 +- yt/geometry/oct_container.pxd | 2 - yt/geometry/oct_container.pyx | 5 +- yt/geometry/ramses_oct_container.pxd | 15 +++ yt/geometry/ramses_oct_container.pyx | 139 +++++++++++++++++++++++++ 8 files changed, 165 insertions(+), 11 deletions(-) create mode 100644 yt/geometry/ramses_oct_container.pxd create mode 100644 yt/geometry/ramses_oct_container.pyx diff --git a/setup.py b/setup.py index 0c3f8fea9d0..9123917a441 100644 --- a/setup.py +++ b/setup.py @@ -97,6 +97,11 @@ def _compile( "yt/utilities/lib/tsearch.c"], include_dirs=["yt/utilities/lib"], libraries=std_libs), + Extension("yt.geometry.ramses_oct_container", + ["yt/geometry/ramses_oct_container.pyx", + "yt/utilities/lib/tsearch.c"], + include_dirs=["yt/utilities/lib"], + libraries=std_libs), Extension("yt.geometry.oct_visitors", ["yt/geometry/oct_visitors.pyx"], include_dirs=["yt/utilities/lib/"], diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a65262209ea..2335987c80e 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -27,7 +27,7 @@ from .particle_handlers import get_particle_handlers from .field_handlers import get_field_handlers from yt.utilities.cython_fortran_utils import FortranFile as fpu -from yt.geometry.oct_container import \ +from yt.geometry.ramses_oct_container import \ RAMSESOctreeContainer from yt.arraytypes import blankRecordArray diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 160cdb62b39..12fc0918f33 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -2,7 +2,7 @@ cimport cython cimport numpy as np import numpy as np from yt.utilities.cython_fortran_utils cimport FortranFile -from yt.geometry.oct_container cimport RAMSESOctreeContainer +from yt.geometry.ramses_oct_container cimport RAMSESOctreeContainer from yt.utilities.exceptions import YTIllDefinedAMRData ctypedef np.int32_t INT32_t diff --git a/yt/geometry/fake_octree.pyx b/yt/geometry/fake_octree.pyx index e69dc92165f..375f6a5856f 100644 --- a/yt/geometry/fake_octree.pyx +++ b/yt/geometry/fake_octree.pyx @@ -13,11 +13,11 @@ from oct_visitors cimport cind import numpy as np cimport cython -from oct_container cimport Oct, RAMSESOctreeContainer +from oct_container cimport Oct, SparseOctreeContainer # Create a balanced octree by a random walk that recursively # subdivides -def create_fake_octree(RAMSESOctreeContainer oct_handler, +def create_fake_octree(SparseOctreeContainer oct_handler, long max_noct, long max_level, np.ndarray[np.int32_t, ndim=1] ndd, @@ -44,7 +44,7 @@ def create_fake_octree(RAMSESOctreeContainer oct_handler, return cur_leaf -cdef long subdivide(RAMSESOctreeContainer oct_handler, +cdef long subdivide(SparseOctreeContainer oct_handler, Oct *parent, int ind[3], int dd[3], long cur_leaf, long cur_level, diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index 4f7c482f075..3a3571464c4 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -94,8 +94,6 @@ cdef class SparseOctreeContainer(OctreeContainer): cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3]) cdef np.int64_t ipos_to_key(self, int pos[3]) nogil -cdef class RAMSESOctreeContainer(SparseOctreeContainer): - pass cdef extern from "tsearch.h" nogil: void *tsearch(const void *key, void **rootp, diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 4f077a13712..e4695918370 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -210,7 +210,7 @@ cdef class OctreeContainer: ind32[i] = ind[i] self.get_root(ind32, &next) # We want to stop recursing when there's nowhere else to go - while next != NULL and level <= max_level: + while next != NULL and level < max_level: level += 1 for i in range(3): ipos[i] = (ipos[i] << 1) + ind[i] @@ -888,9 +888,6 @@ cdef class SparseOctreeContainer(OctreeContainer): # called. if self.root_nodes != NULL: free(self.root_nodes) -cdef class RAMSESOctreeContainer(SparseOctreeContainer): - pass - cdef class ARTOctreeContainer(OctreeContainer): def __init__(self, oct_domain_dimensions, domain_left_edge, domain_right_edge, partial_coverage = 0, diff --git a/yt/geometry/ramses_oct_container.pxd b/yt/geometry/ramses_oct_container.pxd new file mode 100644 index 00000000000..f399e87e75e --- /dev/null +++ b/yt/geometry/ramses_oct_container.pxd @@ -0,0 +1,15 @@ +""" +RAMSES Oct definitions file + + + + +""" +from oct_container cimport SparseOctreeContainer, OctInfo +from .oct_visitors cimport OctVisitor, Oct, cind +from yt.utilities.lib.fp_utils cimport * +cimport numpy as np + +cdef class RAMSESOctreeContainer(SparseOctreeContainer): + cdef Oct neighbor_in_direction(self, OctInfo *oinfo, np.int64_t *nneighbors, + Oct *o, bint periodicity[3]) \ No newline at end of file diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx new file mode 100644 index 00000000000..ac7aec06495 --- /dev/null +++ b/yt/geometry/ramses_oct_container.pyx @@ -0,0 +1,139 @@ +cimport cython +cimport oct_visitors +from selection_routines cimport SelectorObject, AlwaysSelector +cimport numpy as np +import numpy as np + +cdef class FillFileIndices(oct_visitors.OctVisitor): + cdef np.int64_t[:,:,:,:] cell_inds + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if selected == 0: return + self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index + self.index += 1 + +cdef class NeighborVisitor(oct_visitors.OctVisitor): + cdef np.int64_t[:,:,:,:] cell_inds + cdef np.int64_t[:,:,:,:] neigh_cell_inds + cdef int idim # 0,1,2 for x,y,z + cdef int direction # +1 for +x, -1 for -x + cdef np.uint8_t neigh_ind[3] + cdef RAMSESOctreeContainer octree + cdef OctInfo oi + cdef Oct *neighbour + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void set_neighbour_oct(self): + cdef int i + cdef np.float64_t c, dx + cdef np.int64_t ipos + cdef np.float64_t fcoords[3] + cdef Oct *neighbour + dx = 1.0 / ((1 << self.oref) << self.level) + # Compute position of neighbouring cell + for i in range(3): + c = ((self.pos[i] << self.oref) + self.ind[i]) + if i == self.idim: + fcoords[i] = (c + 0.5 + self.direction) * dx / self.octree.nn[i] + else: + fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] + + # Use octree to find neighbour + neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) + + # Extra step - compute cell position in neighbouring oct (and store in oi.ipos) + if self.oi.level == self.level - 1: + for i in range(3): + ipos = (((self.pos[i] << self.oref) + self.ind[i])) >> 1 + if i == self.idim: + ipos += self.direction + #print('oi.level=%s level=%s oi.ipos[%s]<<2=%s ipos=%s' % ( + # self.oi.level, self.level, i, self.oi.ipos[i]<<2, ipos)) + if (self.oi.ipos[i] << 1) == ipos: + self.oi.ipos[i] = 0 + else: + self.oi.ipos[i] = 1 + self.neighbour = neighbour + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef int i + cdef np.int64_t cell_ind + cdef bint other_oct # True if the neighbouring cell lies in another oct + + # Note that we provide an index even if the cell is not selected. + if selected == 0: return + # Index of neighbouring cell within its oct + for i in range(3): + if i == self.idim: + self.neigh_ind[i] = (self.ind[i] + self.direction) + other_oct = self.neigh_ind[i] != 0 and self.neigh_ind[i] != 1 + if other_oct: + self.neigh_ind[i] %= 2 + else: + self.neigh_ind[i] = self.ind[i] + + if not other_oct: + # Simple case: the neighbouring cell is within the oct + cell_ind = self.cell_inds[o.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] + else: + # Complicated case: the cell is in a neighbouring oct + if self.last != o.domain_ind: + self.set_neighbour_oct() + self.last = o.domain_ind + + if self.neighbour != NULL: + if self.oi.level == self.level -1: + # Need to find cell position in neighbouring oct + for i in range(3): + self.neigh_ind[i] = self.oi.ipos[i] + elif self.oi.level != self.level: + print('FUUUUUCK %s %s' % (self.oi.level, self.level)) + cell_ind = self.cell_inds[self.neighbour.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] + else: + cell_ind = -1 + self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind + + + +cdef class RAMSESOctreeContainer(SparseOctreeContainer): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + cdef Oct neighbor_in_direction(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o, + bint periodicity[3]): + pass + + def neighbors_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): + """Return index on file of all neighbors in a given direction""" + cdef SelectorObject always_selector = AlwaysSelector(None) + cdef FillFileIndices visitor + + cdef int num_cells = selector.count_oct_cells(self, -1) + + # Get the on-file index of each cell + cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((num_cells//8, 2, 2, 2), dtype="int64") + cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) + visitor = FillFileIndices(self, -1) + visitor.cell_inds = cell_inds + + self.visit_all_octs(selector, visitor) + + # Revisit the tree, now querying the neighbour in a given direction + cdef NeighborVisitor n_visitor + n_visitor = NeighborVisitor(self, -1) + n_visitor.idim = idim + n_visitor.direction = -direction + n_visitor.cell_inds = cell_inds + n_visitor.neigh_cell_inds = neigh_cell_inds + n_visitor.octree = self + n_visitor.last = -1 + self.visit_all_octs(always_selector, n_visitor) + + return np.asarray(cell_inds), np.asarray(neigh_cell_inds) From 6ce041687566ea9e09df93a8f7120825052769c7 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jan 2020 09:34:57 +0000 Subject: [PATCH 003/653] More generic code --- yt/geometry/ramses_oct_container.pxd | 4 +- yt/geometry/ramses_oct_container.pyx | 75 ++++++++++++++++++++-------- 2 files changed, 56 insertions(+), 23 deletions(-) diff --git a/yt/geometry/ramses_oct_container.pxd b/yt/geometry/ramses_oct_container.pxd index f399e87e75e..6d5dddd8997 100644 --- a/yt/geometry/ramses_oct_container.pxd +++ b/yt/geometry/ramses_oct_container.pxd @@ -11,5 +11,5 @@ from yt.utilities.lib.fp_utils cimport * cimport numpy as np cdef class RAMSESOctreeContainer(SparseOctreeContainer): - cdef Oct neighbor_in_direction(self, OctInfo *oinfo, np.int64_t *nneighbors, - Oct *o, bint periodicity[3]) \ No newline at end of file + cdef Oct neighbour_in_direction(self, OctInfo *oinfo, np.int64_t *nneighbors, + Oct *o, bint periodicity[3]) diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index ac7aec06495..2cf71d7cbbc 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -14,12 +14,11 @@ cdef class FillFileIndices(oct_visitors.OctVisitor): self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index self.index += 1 -cdef class NeighborVisitor(oct_visitors.OctVisitor): - cdef np.int64_t[:,:,:,:] cell_inds - cdef np.int64_t[:,:,:,:] neigh_cell_inds +cdef class BaseNeighbourVisitor(oct_visitors.OctVisitor): cdef int idim # 0,1,2 for x,y,z cdef int direction # +1 for +x, -1 for -x cdef np.uint8_t neigh_ind[3] + cdef np.int64_t[:,:,:,:] cell_inds cdef RAMSESOctreeContainer octree cdef OctInfo oi cdef Oct *neighbour @@ -51,8 +50,6 @@ cdef class NeighborVisitor(oct_visitors.OctVisitor): ipos = (((self.pos[i] << self.oref) + self.ind[i])) >> 1 if i == self.idim: ipos += self.direction - #print('oi.level=%s level=%s oi.ipos[%s]<<2=%s ipos=%s' % ( - # self.oi.level, self.level, i, self.oi.ipos[i]<<2, ipos)) if (self.oi.ipos[i] << 1) == ipos: self.oi.ipos[i] = 0 else: @@ -62,13 +59,13 @@ cdef class NeighborVisitor(oct_visitors.OctVisitor): @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): + cdef np.int64_t get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): cdef int i cdef np.int64_t cell_ind cdef bint other_oct # True if the neighbouring cell lies in another oct # Note that we provide an index even if the cell is not selected. - if selected == 0: return + # if selected == 0: return -1 # Index of neighbouring cell within its oct for i in range(3): if i == self.idim: @@ -89,47 +86,83 @@ cdef class NeighborVisitor(oct_visitors.OctVisitor): self.last = o.domain_ind if self.neighbour != NULL: - if self.oi.level == self.level -1: - # Need to find cell position in neighbouring oct + if self.oi.level == self.level - 1: + # Position within neighbouring oct is stored in oi.ipos for i in range(3): self.neigh_ind[i] = self.oi.ipos[i] elif self.oi.level != self.level: - print('FUUUUUCK %s %s' % (self.oi.level, self.level)) + print('This should not happen! %s %s' % (self.oi.level, self.level)) + return -1 cell_ind = self.cell_inds[self.neighbour.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] else: cell_ind = -1 + return cell_ind + + cdef inline np.uint8_t neighbour_rind(self): + cdef int d = (1 << self.oref) + return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) + +cdef class NeighbourVisitor(BaseNeighbourVisitor): + cdef np.int64_t[:,:,:,:] neigh_cell_inds + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef np.int64_t cell_ind + cell_ind = self.get_neighbour_cell_index(o, selected) self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind - +cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): + cdef np.uint8_t[:] shifted_levels + cdef np.int64_t[:] shifted_file_inds + cdef np.uint8_t[:] shifted_cell_inds + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef np.int64_t neigbour_cell_index + if selected == 0: return + # Note: only selected items have an index + neighbour_index = self.get_neighbour_cell_index(o, selected) + self.shifted_levels[self.index] = self.oi.level + self.shifted_file_inds[self.index] = self.neighbour.file_ind + self.shifted_cell_inds[self.index] = self.neighbour_rind() + self.index += 1 + # if neighbour_index > -1: + # self.shifted_levels[neighbour_index] = self.level + # self.shifted_file_inds[neighbour_index] = o.file_ind + # self.shifted_cell_inds[neighbour_index] = self.rind() cdef class RAMSESOctreeContainer(SparseOctreeContainer): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - cdef Oct neighbor_in_direction(self, OctInfo *oi, np.int64_t *nneighbors, Oct *o, + cdef Oct neighbour_in_direction(self, OctInfo *oi, np.int64_t *nneighbours, Oct *o, bint periodicity[3]): pass - def neighbors_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): - """Return index on file of all neighbors in a given direction""" + def neighbours_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): + """Return index on file of all neighbours in a given direction""" cdef SelectorObject always_selector = AlwaysSelector(None) - cdef FillFileIndices visitor cdef int num_cells = selector.count_oct_cells(self, -1) # Get the on-file index of each cell - cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((num_cells//8, 2, 2, 2), dtype="int64") - cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) + cdef FillFileIndices visitor + cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") + visitor = FillFileIndices(self, -1) visitor.cell_inds = cell_inds self.visit_all_octs(selector, visitor) - # Revisit the tree, now querying the neighbour in a given direction - cdef NeighborVisitor n_visitor - n_visitor = NeighborVisitor(self, -1) + # Store the index of the neighbour + cdef NeighbourVisitor n_visitor + cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) + n_visitor = NeighbourVisitor(self, -1) n_visitor.idim = idim - n_visitor.direction = -direction + n_visitor.direction = direction n_visitor.cell_inds = cell_inds n_visitor.neigh_cell_inds = neigh_cell_inds n_visitor.octree = self From 2b0baa360e7c5838d5ad36c8cccc119190b5dc19 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jan 2020 14:47:50 +0000 Subject: [PATCH 004/653] Remove useless variable on oct container --- yt/geometry/oct_container.pyx | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index e4695918370..544c88af7c3 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -728,14 +728,13 @@ cdef class OctreeContainer: cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest cdef int i - cdef np.int64_t local_filled = 0 + cdef str key for key in dest_fields: dest = dest_fields[key] source = source_fields[key] for i in range(levels.shape[0]): if levels[i] != level: continue dest[i + offset] = source[file_inds[i], cell_inds[i]] - local_filled += 1 def finalize(self): cdef SelectorObject selector = selection_routines.AlwaysSelector(None) From 8cf8a565564a1ac31c1c12e6a82101922b25e37d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jan 2020 14:48:59 +0000 Subject: [PATCH 005/653] First working (not crashing version) --- yt/geometry/ramses_oct_container.pyx | 85 +++++++++++++++++++++++++--- 1 file changed, 77 insertions(+), 8 deletions(-) diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index 2cf71d7cbbc..f6b6e98ef55 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -13,7 +13,7 @@ cdef class FillFileIndices(oct_visitors.OctVisitor): if selected == 0: return self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index self.index += 1 - + cdef class BaseNeighbourVisitor(oct_visitors.OctVisitor): cdef int idim # 0,1,2 for x,y,z cdef int direction # +1 for +x, -1 for -x @@ -125,14 +125,15 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): if selected == 0: return # Note: only selected items have an index neighbour_index = self.get_neighbour_cell_index(o, selected) - self.shifted_levels[self.index] = self.oi.level - self.shifted_file_inds[self.index] = self.neighbour.file_ind - self.shifted_cell_inds[self.index] = self.neighbour_rind() + self.shifted_levels[self.index] = self.level + if self.neighbour != NULL: + # Note: we store the local level, not the remote one + self.shifted_file_inds[self.index] = self.neighbour.file_ind + self.shifted_cell_inds[self.index] = self.neighbour_rind() + else: + self.shifted_file_inds[self.index] = -1 + self.shifted_cell_inds[self.index] = 255 # -1 on uint8 self.index += 1 - # if neighbour_index > -1: - # self.shifted_levels[neighbour_index] = self.level - # self.shifted_file_inds[neighbour_index] = o.file_ind - # self.shifted_cell_inds[neighbour_index] = self.rind() cdef class RAMSESOctreeContainer(SparseOctreeContainer): @cython.boundscheck(False) @@ -170,3 +171,71 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): self.visit_all_octs(always_selector, n_visitor) return np.asarray(cell_inds), np.asarray(neigh_cell_inds) + + def file_index_octs_with_shift(self, SelectorObject selector, int domain_id, + int idim, int direction, int num_cells = -1): + """Return index on file of all neighbours in a given direction""" + # We create oct arrays of the correct size + cdef np.int64_t i + if num_cells < 0: + num_cells = selector.count_oct_cells(self, domain_id) + + # TODO remove this requirement + cdef np.ndarray[np.int64_t, ndim=4] cell_inds + cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") - 1 + + # Fill value of each cell with its neighbouring value + cdef FillFileIndicesRNeighbour neigh_visitor + cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels + cdef np.ndarray[np.uint8_t, ndim=1] shifted_cell_inds + cdef np.ndarray[np.int64_t, ndim=1] shifted_file_inds + shifted_levels = np.zeros(num_cells, dtype="uint8") + shifted_file_inds = np.zeros(num_cells, dtype="int64") + shifted_cell_inds = np.zeros(num_cells, dtype="uint8") + + if self.fill_style == "r": + neigh_visitor = FillFileIndicesRNeighbour(self, domain_id) + # input: index of neighbouring cells + neigh_visitor.cell_inds = cell_inds + # output: level, file_ind and cell_ind of the neighbouring cells + neigh_visitor.shifted_levels = shifted_levels + neigh_visitor.shifted_file_inds = shifted_file_inds + neigh_visitor.shifted_cell_inds = shifted_cell_inds + # direction to explore and extra parameters of the visitor + neigh_visitor.idim = idim + neigh_visitor.direction = direction + neigh_visitor.octree = self + neigh_visitor.last = -1 + elif self.fill_style == "o": + raise NotImplementedError('C-style filling with spatial offset has not been implemented.') + else: + raise RuntimeError + self.visit_all_octs(selector, neigh_visitor) + return shifted_levels, shifted_cell_inds, shifted_file_inds + + def file_index_octs(self, SelectorObject selector, int domain_id, + num_cells = -1, spatial_offset=(0, 0, 0)): + + + cdef int i, idim, direction + cdef bint do_spatial_offset + cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds + cdef np.ndarray source_shifted + + do_spatial_offset = False + for i in range(3): + if spatial_offset[i] == 1 or spatial_offset[i] == -1: + idim = i + direction = spatial_offset[i] + if do_spatial_offset: + raise Exception( + 'ERROR: You can only specify one spatial offset direction, got [%s, %s, %s]!' % + (spatial_offset[0], spatial_offset[1], spatial_offset[2])) + do_spatial_offset = True + + if not do_spatial_offset: + return super(RAMSESOctreeContainer, self).file_index_octs( + selector, domain_id, num_cells) + else: + return self.file_index_octs_with_shift( + selector, domain_id, idim, direction, num_cells) From ab4410eac4c787dfd91d92cf9a4404c334167991 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 09:16:35 +0100 Subject: [PATCH 006/653] Does not work yet, but hey it compiles --- yt/geometry/oct_container.pxd | 8 +- yt/geometry/oct_visitors.pxd | 32 +++++ yt/geometry/oct_visitors.pyx | 119 +++++++++++++++- yt/geometry/ramses_oct_container.pyx | 195 +++++---------------------- 4 files changed, 186 insertions(+), 168 deletions(-) diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index 3a3571464c4..eb500d7fd56 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -12,7 +12,7 @@ cimport numpy as np from yt.utilities.lib.fp_utils cimport * cimport oct_visitors cimport selection_routines -from .oct_visitors cimport OctVisitor, Oct, cind +from .oct_visitors cimport OctVisitor, Oct, cind, OctInfo from libc.stdlib cimport bsearch, qsort, realloc, malloc, free from libc.math cimport floor from yt.utilities.lib.allocation_container cimport \ @@ -27,12 +27,6 @@ cdef struct OctKey: np.int64_t *indices np.int64_t pcount -cdef struct OctInfo: - np.float64_t left_edge[3] - np.float64_t dds[3] - np.int64_t ipos[3] - np.int32_t level - cdef struct OctList cdef struct OctList: diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 125c69a523e..89dec648108 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -17,6 +17,12 @@ cdef struct Oct: np.int64_t domain # (opt) addl int index Oct **children # Up to 8 long +cdef struct OctInfo: + np.float64_t left_edge[3] + np.float64_t dds[3] + np.int64_t ipos[3] + np.int32_t level + cdef struct OctPadded: np.int64_t file_ind np.int64_t domain_ind @@ -136,3 +142,29 @@ cdef inline int cind(int i, int j, int k) nogil: # THIS ONLY WORKS FOR CHILDREN. It is not general for zones. return (((i*2)+j)*2+k) +from oct_container cimport OctreeContainer + +# cimport oct_container +cdef class BaseNeighbourVisitor(OctVisitor): + cdef int idim # 0,1,2 for x,y,z + cdef int direction # +1 for +x, -1 for -x + cdef np.int8_t[:] neigh_ind + cdef Oct *neighbour + cdef OctreeContainer octree + cdef OctInfo oi + + cdef void set_neighbour_oct(self) + cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected) + + cdef inline np.uint8_t neighbour_rind(self): + cdef int d = (1 << self.oref) + return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) + +# # # cdef class NeighbourVisitor(BaseNeighbourVisitor): +# # # cdef np.int64_t[:,:,:,:] neigh_cell_inds + +cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): + cdef np.uint8_t[:] shifted_levels + cdef np.int64_t[:] shifted_file_inds + cdef np.uint8_t[:] shifted_cell_inds + diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index f8212b6dff8..ce754f40f80 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -12,7 +12,7 @@ cimport numpy import numpy from yt.utilities.lib.fp_utils cimport * from libc.stdlib cimport malloc, free -from yt.geometry.oct_container cimport OctreeContainer +from yt.geometry.oct_container cimport OctreeContainer, OctInfo from yt.utilities.lib.geometry_utils cimport encode_morton_64bit # Now some visitor functions @@ -336,3 +336,120 @@ cdef class MortonIndexOcts(OctVisitor): np.uint64(coord[1]), np.uint64(coord[2])) self.index += 1 + + +cdef class BaseNeighbourVisitor(OctVisitor): + # cdef OctInfo oi + # cdef OctreeContainer octree + + def __init__(self, OctreeContainer octree, int domain_id = -1): + self.octree = octree + super(BaseNeighbourVisitor, self).__init__(octree, domain_id) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void set_neighbour_oct(self): + cdef int i + cdef np.float64_t c, dx + cdef np.int64_t ipos + cdef np.float64_t fcoords[3] + cdef Oct *neighbour + dx = 1.0 / ((1 << self.oref) << self.level) + # Compute position of neighbouring cell + for i in range(3): + c = ((self.pos[i] << self.oref) + self.ind[i]) + if i == self.idim: + fcoords[i] = (c + 0.5 + self.direction) * dx / self.octree.nn[i] + else: + fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] + + # Use octree to find neighbour + neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) + + # Extra step - compute cell position in neighbouring oct (and store in oi.ipos) + if self.oi.level == self.level - 1: + for i in range(3): + ipos = (((self.pos[i] << self.oref) + self.ind[i])) >> 1 + if i == self.idim: + ipos += self.direction + if (self.oi.ipos[i] << 1) == ipos: + self.oi.ipos[i] = 0 + else: + self.oi.ipos[i] = 1 + self.neighbour = neighbour + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + @cython.cdivision(True) + cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): + cdef int i + # cdef np.int64_t cell_ind + cdef bint other_oct # True if the neighbouring cell lies in another oct + + # Note that we provide an index even if the cell is not selected. + # if selected == 0: return -1 + # Index of neighbouring cell within its oct + for i in range(3): + if i == self.idim: + self.neigh_ind[i] = (self.ind[i] + self.direction) + other_oct = self.neigh_ind[i] < 0 or self.neigh_ind[i] > 1 + if other_oct: + self.neigh_ind[i] %= 2 + else: + self.neigh_ind[i] = self.ind[i] + + if not other_oct: + # Simple case: the neighbouring cell is within the oct + # cell_ind = self.cell_inds[o.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] + self.neighbour = o + else: + # Complicated case: the cell is in a neighbouring oct + if self.last != o.domain_ind: + self.set_neighbour_oct() + self.last = o.domain_ind + + if self.neighbour != NULL: + if self.oi.level == self.level - 1: + # Position within neighbouring oct is stored in oi.ipos + for i in range(3): + self.neigh_ind[i] = self.oi.ipos[i] + if not ( 0<= self.oi.ipos[i] <= 1): + print('WHAAAAT?!', self.oi.ipos[i]) + elif self.oi.level != self.level: + print('This should not happen! %s %s' % (self.oi.level, self.level)) + return # -1 + # cell_ind = self.cell_inds[self.neighbour.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] + # else: + # cell_ind = -1 + # return cell_ind + +# cdef class NeighbourVisitor(BaseNeighbourVisitor): +# @cython.boundscheck(False) +# @cython.wraparound(False) +# @cython.initializedcheck(False) +# cdef void visit(self, Oct* o, np.uint8_t selected): +# cdef np.int64_t cell_ind +# # cell_ind = self.get_neighbour_cell_index(o, selected) +# cell_ind = 0 +# self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind + +cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef np.int64_t neigbour_cell_index + if selected == 0: return + # Note: only selected items have an index + neighbour_index = self.get_neighbour_cell_index(o, selected) + self.shifted_levels[self.index] = self.level + if self.neighbour != NULL: + # Note: we store the local level, not the remote one + self.shifted_file_inds[self.index] = self.neighbour.file_ind + self.shifted_cell_inds[self.index] = self.neighbour_rind() + else: + self.shifted_file_inds[self.index] = -1 + self.shifted_cell_inds[self.index] = 255 # -1 on uint8 + self.index += 1 \ No newline at end of file diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index f6b6e98ef55..edf6e60b8d0 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -1,139 +1,18 @@ cimport cython -cimport oct_visitors +from oct_visitors cimport FillFileIndicesRNeighbour from selection_routines cimport SelectorObject, AlwaysSelector cimport numpy as np import numpy as np -cdef class FillFileIndices(oct_visitors.OctVisitor): - cdef np.int64_t[:,:,:,:] cell_inds - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - if selected == 0: return - self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index - self.index += 1 - -cdef class BaseNeighbourVisitor(oct_visitors.OctVisitor): - cdef int idim # 0,1,2 for x,y,z - cdef int direction # +1 for +x, -1 for -x - cdef np.uint8_t neigh_ind[3] - cdef np.int64_t[:,:,:,:] cell_inds - cdef RAMSESOctreeContainer octree - cdef OctInfo oi - cdef Oct *neighbour - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void set_neighbour_oct(self): - cdef int i - cdef np.float64_t c, dx - cdef np.int64_t ipos - cdef np.float64_t fcoords[3] - cdef Oct *neighbour - dx = 1.0 / ((1 << self.oref) << self.level) - # Compute position of neighbouring cell - for i in range(3): - c = ((self.pos[i] << self.oref) + self.ind[i]) - if i == self.idim: - fcoords[i] = (c + 0.5 + self.direction) * dx / self.octree.nn[i] - else: - fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] - - # Use octree to find neighbour - neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) - - # Extra step - compute cell position in neighbouring oct (and store in oi.ipos) - if self.oi.level == self.level - 1: - for i in range(3): - ipos = (((self.pos[i] << self.oref) + self.ind[i])) >> 1 - if i == self.idim: - ipos += self.direction - if (self.oi.ipos[i] << 1) == ipos: - self.oi.ipos[i] = 0 - else: - self.oi.ipos[i] = 1 - self.neighbour = neighbour - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef np.int64_t get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): - cdef int i - cdef np.int64_t cell_ind - cdef bint other_oct # True if the neighbouring cell lies in another oct - - # Note that we provide an index even if the cell is not selected. - # if selected == 0: return -1 - # Index of neighbouring cell within its oct - for i in range(3): - if i == self.idim: - self.neigh_ind[i] = (self.ind[i] + self.direction) - other_oct = self.neigh_ind[i] != 0 and self.neigh_ind[i] != 1 - if other_oct: - self.neigh_ind[i] %= 2 - else: - self.neigh_ind[i] = self.ind[i] - - if not other_oct: - # Simple case: the neighbouring cell is within the oct - cell_ind = self.cell_inds[o.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] - else: - # Complicated case: the cell is in a neighbouring oct - if self.last != o.domain_ind: - self.set_neighbour_oct() - self.last = o.domain_ind - - if self.neighbour != NULL: - if self.oi.level == self.level - 1: - # Position within neighbouring oct is stored in oi.ipos - for i in range(3): - self.neigh_ind[i] = self.oi.ipos[i] - elif self.oi.level != self.level: - print('This should not happen! %s %s' % (self.oi.level, self.level)) - return -1 - cell_ind = self.cell_inds[self.neighbour.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] - else: - cell_ind = -1 - return cell_ind - - cdef inline np.uint8_t neighbour_rind(self): - cdef int d = (1 << self.oref) - return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) - -cdef class NeighbourVisitor(BaseNeighbourVisitor): - cdef np.int64_t[:,:,:,:] neigh_cell_inds - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - cdef np.int64_t cell_ind - cell_ind = self.get_neighbour_cell_index(o, selected) - self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind - -cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): - cdef np.uint8_t[:] shifted_levels - cdef np.int64_t[:] shifted_file_inds - cdef np.uint8_t[:] shifted_cell_inds - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - cdef np.int64_t neigbour_cell_index - if selected == 0: return - # Note: only selected items have an index - neighbour_index = self.get_neighbour_cell_index(o, selected) - self.shifted_levels[self.index] = self.level - if self.neighbour != NULL: - # Note: we store the local level, not the remote one - self.shifted_file_inds[self.index] = self.neighbour.file_ind - self.shifted_cell_inds[self.index] = self.neighbour_rind() - else: - self.shifted_file_inds[self.index] = -1 - self.shifted_cell_inds[self.index] = 255 # -1 on uint8 - self.index += 1 +# cdef class FillFileIndices(oct_visitors.OctVisitor): +# cdef np.int64_t[:,:,:,:] cell_inds +# @cython.boundscheck(False) +# @cython.wraparound(False) +# @cython.initializedcheck(False) +# cdef void visit(self, Oct* o, np.uint8_t selected): +# if selected == 0: return +# self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index +# self.index += 1 cdef class RAMSESOctreeContainer(SparseOctreeContainer): @cython.boundscheck(False) @@ -143,34 +22,34 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): bint periodicity[3]): pass - def neighbours_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): - """Return index on file of all neighbours in a given direction""" - cdef SelectorObject always_selector = AlwaysSelector(None) + # def neighbours_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): + # """Return index on file of all neighbours in a given direction""" + # cdef SelectorObject always_selector = AlwaysSelector(None) - cdef int num_cells = selector.count_oct_cells(self, -1) + # cdef int num_cells = selector.count_oct_cells(self, -1) - # Get the on-file index of each cell - cdef FillFileIndices visitor - cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") + # # Get the on-file index of each cell + # cdef FillFileIndices visitor + # cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") - visitor = FillFileIndices(self, -1) - visitor.cell_inds = cell_inds + # visitor = FillFileIndices(self, -1) + # visitor.cell_inds = cell_inds - self.visit_all_octs(selector, visitor) + # self.visit_all_octs(selector, visitor) + + # # Store the index of the neighbour + # cdef NeighbourVisitor n_visitor + # cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) + # n_visitor = NeighbourVisitor(self, -1) + # n_visitor.idim = idim + # n_visitor.direction = direction + # n_visitor.cell_inds = cell_inds + # n_visitor.neigh_cell_inds = neigh_cell_inds + # n_visitor.octree = self + # n_visitor.last = -1 + # self.visit_all_octs(always_selector, n_visitor) - # Store the index of the neighbour - cdef NeighbourVisitor n_visitor - cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) - n_visitor = NeighbourVisitor(self, -1) - n_visitor.idim = idim - n_visitor.direction = direction - n_visitor.cell_inds = cell_inds - n_visitor.neigh_cell_inds = neigh_cell_inds - n_visitor.octree = self - n_visitor.last = -1 - self.visit_all_octs(always_selector, n_visitor) - - return np.asarray(cell_inds), np.asarray(neigh_cell_inds) + # return np.asarray(cell_inds), np.asarray(neigh_cell_inds) def file_index_octs_with_shift(self, SelectorObject selector, int domain_id, int idim, int direction, int num_cells = -1): @@ -180,10 +59,6 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): if num_cells < 0: num_cells = selector.count_oct_cells(self, domain_id) - # TODO remove this requirement - cdef np.ndarray[np.int64_t, ndim=4] cell_inds - cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") - 1 - # Fill value of each cell with its neighbouring value cdef FillFileIndicesRNeighbour neigh_visitor cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels @@ -195,8 +70,6 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): if self.fill_style == "r": neigh_visitor = FillFileIndicesRNeighbour(self, domain_id) - # input: index of neighbouring cells - neigh_visitor.cell_inds = cell_inds # output: level, file_ind and cell_ind of the neighbouring cells neigh_visitor.shifted_levels = shifted_levels neigh_visitor.shifted_file_inds = shifted_file_inds @@ -210,7 +83,9 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): raise NotImplementedError('C-style filling with spatial offset has not been implemented.') else: raise RuntimeError + print('visiting all octs') self.visit_all_octs(selector, neigh_visitor) + print('visited') return shifted_levels, shifted_cell_inds, shifted_file_inds def file_index_octs(self, SelectorObject selector, int domain_id, From b7c0c2f362c58c1342be1cca22f04d33cf34acde Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jan 2020 16:38:10 +0000 Subject: [PATCH 007/653] Wiring cython code with ghost zones --- yt/data_objects/data_containers.py | 12 ++--- yt/data_objects/octree_subset.py | 3 +- yt/frontends/ramses/data_structures.py | 69 ++++++++++++++++++++++++-- 3 files changed, 73 insertions(+), 11 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 8ebfe3988fd..4b9270c60ac 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -352,13 +352,11 @@ def _generate_spatial_fluid(self, field, ngz): rv = self.ds.arr(np.empty(wogz.ires.size, dtype="float64"), units) outputs.append(rv) - if gz._type_name == 'octree_subset': - raise NotImplementedError - else: - ind += wogz.select( - self.selector, - gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz], - rv, ind) + data = gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz] + ind += wogz.select( + self.selector, + data, + rv, ind) if accumulate: rv = uconcatenate(outputs) return rv diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index dbe297e5d0c..f98876c5e34 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -37,9 +37,10 @@ class OctreeSubset(YTSelectionContainer): _cell_count = -1 _block_reorder = None - def __init__(self, base_region, domain, ds, over_refine_factor = 1): + def __init__(self, base_region, domain, ds, over_refine_factor = 1, num_ghost_zones = 0): super(OctreeSubset, self).__init__(ds, None) self._num_zones = 1 << (over_refine_factor) + self._num_ghost_zones = num_ghost_zones self._oref = over_refine_factor self.domain = domain self.domain_id = domain.domain_id diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 2335987c80e..c3209b42948 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -182,7 +182,9 @@ class RAMSESDomainSubset(OctreeSubset): _domain_offset = 1 _block_reorder = "F" - def fill(self, fd, fields, selector, file_handler): + _base_grid = None + + def _fill_no_ghostzones(self, fd, fields, selector, file_handler): ndim = self.ds.dimensionality # Here we get a copy of the file, which we skip through and read the # bits we want. @@ -198,13 +200,73 @@ def fill(self, fd, fields, selector, file_handler): # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') - fill_hydro(fd, file_handler.offset, file_handler.level_count, levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler) return tr + def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zones): + ndim = self.ds.dimensionality + # Here we get a copy of the file, which we skip through and read the + # bits we want. + oct_handler = self.oct_handler + all_fields = [f for ft, f in file_handler.field_list] + fields = [f for ft, f in fields] + + oct_count = selector.count_octs(self.oct_handler, self.domain_id) + cell_count = oct_count * self._num_zones**ndim + iwidth = self._num_zones + num_ghost_zones * 2 + + tr = {} + tr_all = {} + for field in fields: + tr_all[field] = np.zeros((oct_count, iwidth, iwidth, iwidth), 'float64') + tr[field] = np.zeros(cell_count, 'float64') + + # Compute the index to read with a positive and negative shift in all dimensions + for idim in range(ndim): + ishift_all = [num_ghost_zones]*ndim + for shift in range(-num_ghost_zones, num_ghost_zones+1, 2): + ishift_all[idim] = num_ghost_zones + shift + import yt + new_selector = yt.geometry.selection_routines.OctreeSubsetSelector(self) + if shift == 0: + continue + levels, cell_inds, file_inds = self.oct_handler.file_index_octs_with_shift( + new_selector, self.domain_id, cell_count, idim, shift) + + # Initializing data container + for field in fields: + tr[field][:] = 0 + + fill_hydro(fd, file_handler.offset, + file_handler.level_count, levels, cell_inds, + file_inds, ndim, all_fields, fields, tr, + oct_handler) + _slice = tuple( + [slice(None)] + + [slice(i, i+self._num_zones) for i in ishift_all]) + for field in fields: + tr_all[field][_slice] = \ + tr[field].reshape(oct_count, 2, 2, 2) + for field in fields: + tr_all[field] = tr_all[field].reshape(-1) + return tr_all + + def fill(self, fd, fields, selector, file_handler): + if self._num_ghost_zones == 0: + return self._fill_no_ghostzones(fd, fields, selector, file_handler) + else: + return self._fill_with_ghostzones(fd, fields, selector, file_handler, self._num_ghost_zones) + + def retrieve_ghost_zones(self, ngz, fields, smoothed=False): + new_subset = RAMSESDomainSubset(self.base_region, self.domain, self.ds, num_ghost_zones=ngz) + new_subset._base_grid = self + + return new_subset + + class RAMSESIndex(OctreeIndex): def __init__(self, ds, dataset_type='ramses'): @@ -255,13 +317,14 @@ def _detect_output_fields(self): self.field_list = self.particle_field_list + self.fluid_field_list def _identify_base_chunk(self, dobj): + ngz = dobj._num_ghost_zones if getattr(dobj, "_chunk_info", None) is None: domains = [dom for dom in self.domains if dom.included(dobj.selector)] base_region = getattr(dobj, "base_region", dobj) if len(domains) > 1: mylog.debug("Identified %s intersecting domains", len(domains)) - subsets = [RAMSESDomainSubset(base_region, domain, self.dataset) + subsets = [RAMSESDomainSubset(base_region, domain, self.dataset, num_ghost_zones=ngz) for domain in domains] dobj._chunk_info = subsets dobj._current_chunk = list(self._chunk_all(dobj))[0] From 8da0a17318b713548f8ecfb3f79bbbb772bdf22a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jan 2020 20:43:13 +0000 Subject: [PATCH 008/653] Cleanup --- yt/geometry/oct_visitors.pxd | 3 ++- yt/geometry/oct_visitors.pyx | 30 ++++++------------------------ 2 files changed, 8 insertions(+), 25 deletions(-) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 89dec648108..18493856878 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -16,6 +16,7 @@ cdef struct Oct: np.int64_t domain_ind # index within the global set of domains np.int64_t domain # (opt) addl int index Oct **children # Up to 8 long + Oct *parent cdef struct OctInfo: np.float64_t left_edge[3] @@ -153,7 +154,7 @@ cdef class BaseNeighbourVisitor(OctVisitor): cdef OctreeContainer octree cdef OctInfo oi - cdef void set_neighbour_oct(self) + cdef void set_neighbour_oct(self, Oct* o) cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected) cdef inline np.uint8_t neighbour_rind(self): diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index ce754f40f80..3d4d639a977 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -8,8 +8,8 @@ Oct visitor functions cimport cython -cimport numpy -import numpy +cimport numpy as np +import numpy as np from yt.utilities.lib.fp_utils cimport * from libc.stdlib cimport malloc, free from yt.geometry.oct_container cimport OctreeContainer, OctInfo @@ -339,17 +339,15 @@ cdef class MortonIndexOcts(OctVisitor): cdef class BaseNeighbourVisitor(OctVisitor): - # cdef OctInfo oi - # cdef OctreeContainer octree - def __init__(self, OctreeContainer octree, int domain_id = -1): self.octree = octree + self.neigh_ind = np.zeros(3, np.int8) super(BaseNeighbourVisitor, self).__init__(octree, domain_id) @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) - cdef void set_neighbour_oct(self): + cdef void set_neighbour_oct(self, Oct *o): cdef int i cdef np.float64_t c, dx cdef np.int64_t ipos @@ -385,7 +383,6 @@ cdef class BaseNeighbourVisitor(OctVisitor): @cython.cdivision(True) cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): cdef int i - # cdef np.int64_t cell_ind cdef bint other_oct # True if the neighbouring cell lies in another oct # Note that we provide an index even if the cell is not selected. @@ -402,12 +399,11 @@ cdef class BaseNeighbourVisitor(OctVisitor): if not other_oct: # Simple case: the neighbouring cell is within the oct - # cell_ind = self.cell_inds[o.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] self.neighbour = o else: # Complicated case: the cell is in a neighbouring oct if self.last != o.domain_ind: - self.set_neighbour_oct() + self.set_neighbour_oct(o) self.last = o.domain_ind if self.neighbour != NULL: @@ -419,21 +415,7 @@ cdef class BaseNeighbourVisitor(OctVisitor): print('WHAAAAT?!', self.oi.ipos[i]) elif self.oi.level != self.level: print('This should not happen! %s %s' % (self.oi.level, self.level)) - return # -1 - # cell_ind = self.cell_inds[self.neighbour.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] - # else: - # cell_ind = -1 - # return cell_ind - -# cdef class NeighbourVisitor(BaseNeighbourVisitor): -# @cython.boundscheck(False) -# @cython.wraparound(False) -# @cython.initializedcheck(False) -# cdef void visit(self, Oct* o, np.uint8_t selected): -# cdef np.int64_t cell_ind -# # cell_ind = self.get_neighbour_cell_index(o, selected) -# cell_ind = 0 -# self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind + return cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): @cython.boundscheck(False) From adb21bab0433481ca9a28c76a29a7cff5047890e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 23 Jan 2020 14:48:14 +0000 Subject: [PATCH 009/653] First working (not crashing) version --- yt/frontends/ramses/data_structures.py | 73 +++++++++++++++++++++++--- yt/geometry/oct_container.pyx | 9 +++- yt/geometry/oct_visitors.pxd | 9 +++- yt/geometry/oct_visitors.pyx | 66 +++++++++++++++++------ yt/geometry/ramses_oct_container.pyx | 67 ++++++++++++++--------- 5 files changed, 172 insertions(+), 52 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c3209b42948..b50a3d76f28 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -10,6 +10,8 @@ setdefaultattr from yt.geometry.oct_geometry_handler import \ OctreeIndex +from yt.geometry.selection_routines import OctreeSubsetSelector + from yt.geometry.geometry_handler import \ YTDataChunk from yt.data_objects.static_output import \ @@ -182,7 +184,18 @@ class RAMSESDomainSubset(OctreeSubset): _domain_offset = 1 _block_reorder = "F" - _base_grid = None + _base_domain = None + + def __init__(self, base_region, domain, ds, over_refine_factor=1, num_ghost_zones=0, + base_grid=None): + super(RAMSESDomainSubset, self).__init__(base_region, domain, ds, over_refine_factor, num_ghost_zones) + + self._base_grid = base_grid + + if num_ghost_zones > 0: + # Create a base domain *with no self._base_domain.fwidth + base_domain = RAMSESDomainSubset(ds.all_data(), domain, ds, over_refine_factor) + self._base_domain = base_domain def _fill_no_ghostzones(self, fd, fields, selector, file_handler): ndim = self.ds.dimensionality @@ -221,7 +234,7 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo tr = {} tr_all = {} for field in fields: - tr_all[field] = np.zeros((oct_count, iwidth, iwidth, iwidth), 'float64') + tr_all[field] = np.full((oct_count, iwidth, iwidth, iwidth), np.nan, 'float64') tr[field] = np.zeros(cell_count, 'float64') # Compute the index to read with a positive and negative shift in all dimensions @@ -229,12 +242,10 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo ishift_all = [num_ghost_zones]*ndim for shift in range(-num_ghost_zones, num_ghost_zones+1, 2): ishift_all[idim] = num_ghost_zones + shift - import yt - new_selector = yt.geometry.selection_routines.OctreeSubsetSelector(self) if shift == 0: continue levels, cell_inds, file_inds = self.oct_handler.file_index_octs_with_shift( - new_selector, self.domain_id, cell_count, idim, shift) + selector, self.domain_id, idim, shift, cell_count) # Initializing data container for field in fields: @@ -254,6 +265,55 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo tr_all[field] = tr_all[field].reshape(-1) return tr_all + @property + def fwidth(self): + fwidth = super(RAMSESDomainSubset, self).fwidth + if self._num_ghost_zones > 0: + fwidth = super(RAMSESDomainSubset, self).fwidth.reshape(-1, 8, 3) + n_oct = fwidth.shape[0] + new_fwidth = np.zeros((n_oct, self.nz**3, 3), dtype=fwidth.dtype) + new_fwidth[:, :, :] = fwidth[:, 0:1, :] + fwidth = new_fwidth.reshape(-1, 3) + return fwidth + + @property + def fcoords(self): + fcoords = super(RAMSESDomainSubset, self).fcoords + num_ghost_zones = self._num_ghost_zones + if num_ghost_zones == 0: + return fcoords + + fcoords_base = self._base_domain.fcoords + oct_selector = OctreeSubsetSelector(self) + oh = self.oct_handler + + n_oct = fcoords_base.size // 3 // 8 + new_fcoords = np.full((n_oct, 4, 4, 4, 3), np.nan) + + icell = oh.fill_index(oct_selector) + Ncell = icell.size + + for idim in range(3): + for idir in (-1, 1): + ishift_all = [1, 1, 1] + ishift_all[idim] += idir + + nicell = oh.neighbours_in_direction(idim, idir, icell).reshape(-1) + + tmp = np.full((n_oct * 2 * 2 * 2, 3), np.nan) + + oh.copy_neighbour_data( + icell.reshape(-1), nicell, + fcoords_base, tmp, Ncell) + + _slice = tuple([slice(None)] + + [slice(i, i+2) for i in ishift_all] + + [slice(None)]) + new_fcoords[_slice] = tmp.reshape(n_oct, 2, 2, 2, -1) + new_fcoords = self.ds.arr(new_fcoords, fcoords_base.units) + return new_fcoords + + def fill(self, fd, fields, selector, file_handler): if self._num_ghost_zones == 0: return self._fill_no_ghostzones(fd, fields, selector, file_handler) @@ -261,8 +321,7 @@ def fill(self, fd, fields, selector, file_handler): return self._fill_with_ghostzones(fd, fields, selector, file_handler, self._num_ghost_zones) def retrieve_ghost_zones(self, ngz, fields, smoothed=False): - new_subset = RAMSESDomainSubset(self.base_region, self.domain, self.ds, num_ghost_zones=ngz) - new_subset._base_grid = self + new_subset = RAMSESDomainSubset(self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self) return new_subset diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 544c88af7c3..73d349547f7 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -566,6 +566,7 @@ cdef class OctreeContainer: cdef int ind[3] cdef int nb = 0 cdef Oct *cur + cdef Oct *parent cdef np.float64_t pp[3] cdef np.float64_t cp[3] cdef np.float64_t dds[3] @@ -574,6 +575,7 @@ cdef class OctreeContainer: cdef OctAllocationContainer *cont = self.domains.get_cont(curdom - 1) cdef int initial = cont.n_assigned cdef int in_boundary = 0 + parent = NULL # How do we bootstrap ourselves? for p in range(no): #for every oct we're trying to add find the @@ -606,10 +608,12 @@ cdef class OctreeContainer: ind[i] = 1 cp[i] += dds[i]/2.0 # Check if it has not been allocated + parent = cur cur = self.next_child(curdom, ind, cur) # Now we should be at the right level cur.domain = curdom cur.file_ind = p + cur.parent = parent return cont.n_assigned - initial + nb def allocate_domains(self, domain_counts): @@ -734,7 +738,10 @@ cdef class OctreeContainer: source = source_fields[key] for i in range(levels.shape[0]): if levels[i] != level: continue - dest[i + offset] = source[file_inds[i], cell_inds[i]] + if file_inds[i] < 0: + dest[i + offset] = np.nan + else: + dest[i + offset] = source[file_inds[i], cell_inds[i]] def finalize(self): cdef SelectorObject selector = selection_routines.AlwaysSelector(None) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 18493856878..675bacdf8c5 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -145,11 +145,15 @@ cdef inline int cind(int i, int j, int k) nogil: from oct_container cimport OctreeContainer +cdef class StoreIndex(OctVisitor): + cdef np.int64_t[:,:,:,:] cell_inds + # cimport oct_container cdef class BaseNeighbourVisitor(OctVisitor): cdef int idim # 0,1,2 for x,y,z cdef int direction # +1 for +x, -1 for -x cdef np.int8_t[:] neigh_ind + cdef bint other_oct cdef Oct *neighbour cdef OctreeContainer octree cdef OctInfo oi @@ -161,8 +165,9 @@ cdef class BaseNeighbourVisitor(OctVisitor): cdef int d = (1 << self.oref) return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) -# # # cdef class NeighbourVisitor(BaseNeighbourVisitor): -# # # cdef np.int64_t[:,:,:,:] neigh_cell_inds +cdef class NeighbourVisitor(BaseNeighbourVisitor): + cdef np.int64_t[:,:,:,:] cell_inds + cdef np.int64_t[:,:,:,:] neigh_cell_inds cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.uint8_t[:] shifted_levels diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 3d4d639a977..18d5dc64042 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -337,6 +337,15 @@ cdef class MortonIndexOcts(OctVisitor): np.uint64(coord[2])) self.index += 1 +cdef class StoreIndex(OctVisitor): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if not selected: return + self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index + + self.index += 1 cdef class BaseNeighbourVisitor(OctVisitor): def __init__(self, OctreeContainer octree, int domain_id = -1): @@ -358,7 +367,7 @@ cdef class BaseNeighbourVisitor(OctVisitor): for i in range(3): c = ((self.pos[i] << self.oref) + self.ind[i]) if i == self.idim: - fcoords[i] = (c + 0.5 + self.direction) * dx / self.octree.nn[i] + fcoords[i] = (c + 0.5 + 2*self.direction) * dx / self.octree.nn[i] else: fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] @@ -384,6 +393,12 @@ cdef class BaseNeighbourVisitor(OctVisitor): cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): cdef int i cdef bint other_oct # True if the neighbouring cell lies in another oct + cdef np.int64_t cell_ind + + # Compute information about neighbour once per oct + if self.last != o.domain_ind: + self.set_neighbour_oct(o) + self.last = o.domain_ind # Note that we provide an index even if the cell is not selected. # if selected == 0: return -1 @@ -393,29 +408,48 @@ cdef class BaseNeighbourVisitor(OctVisitor): self.neigh_ind[i] = (self.ind[i] + self.direction) other_oct = self.neigh_ind[i] < 0 or self.neigh_ind[i] > 1 if other_oct: - self.neigh_ind[i] %= 2 + # trick here: we want modulo with positive remainder, but neigh_ind may be negative so cast + # it to unsigned int *before* applying modulo. + self.neigh_ind[i] = (self.neigh_ind[i]) % 2 else: self.neigh_ind[i] = self.ind[i] - if not other_oct: - # Simple case: the neighbouring cell is within the oct - self.neighbour = o - else: - # Complicated case: the cell is in a neighbouring oct - if self.last != o.domain_ind: - self.set_neighbour_oct(o) - self.last = o.domain_ind - + self.other_oct = other_oct + if other_oct: if self.neighbour != NULL: if self.oi.level == self.level - 1: # Position within neighbouring oct is stored in oi.ipos for i in range(3): self.neigh_ind[i] = self.oi.ipos[i] - if not ( 0<= self.oi.ipos[i] <= 1): - print('WHAAAAT?!', self.oi.ipos[i]) elif self.oi.level != self.level: print('This should not happen! %s %s' % (self.oi.level, self.level)) - return + self.neighbour = NULL + +cdef class NeighbourVisitor(BaseNeighbourVisitor): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef np.int64_t cell_ind + cdef Oct *neighbour_oct + cdef bint ok + + self.get_neighbour_cell_index(o, selected) + if not self.other_oct: + neighbour_oct = o + ok = True + elif self.neighbour != NULL: + neighbour_oct = self.neighbour + ok = True + else: + ok = False + + if ok: + cell_ind = self.cell_inds[neighbour_oct.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] + else: + cell_ind = -1 + + self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): @cython.boundscheck(False) @@ -425,7 +459,7 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.int64_t neigbour_cell_index if selected == 0: return # Note: only selected items have an index - neighbour_index = self.get_neighbour_cell_index(o, selected) + self.get_neighbour_cell_index(o, selected) self.shifted_levels[self.index] = self.level if self.neighbour != NULL: # Note: we store the local level, not the remote one @@ -434,4 +468,4 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): else: self.shifted_file_inds[self.index] = -1 self.shifted_cell_inds[self.index] = 255 # -1 on uint8 - self.index += 1 \ No newline at end of file + self.index += 1 diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index edf6e60b8d0..8f471775f11 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -1,5 +1,5 @@ cimport cython -from oct_visitors cimport FillFileIndicesRNeighbour +from oct_visitors cimport FillFileIndicesRNeighbour, StoreIndex, NeighbourVisitor from selection_routines cimport SelectorObject, AlwaysSelector cimport numpy as np import numpy as np @@ -22,34 +22,51 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): bint periodicity[3]): pass - # def neighbours_in_direction(self, int idim, int direction, SelectorObject selector = AlwaysSelector(None)): - # """Return index on file of all neighbours in a given direction""" - # cdef SelectorObject always_selector = AlwaysSelector(None) + def fill_index(self, SelectorObject selector = AlwaysSelector(None)): + # Get the on-file index of each cell + cdef StoreIndex visitor - # cdef int num_cells = selector.count_oct_cells(self, -1) + cdef np.int64_t[:, :, :, :] cell_inds, - # # Get the on-file index of each cell - # cdef FillFileIndices visitor - # cdef np.ndarray[np.int64_t, ndim=4] cell_inds = np.zeros((self.nocts, 2, 2, 2), dtype="int64") + cell_inds = np.full((self.nocts, 2, 2, 2), -1, dtype=np.int64) - # visitor = FillFileIndices(self, -1) - # visitor.cell_inds = cell_inds + visitor = StoreIndex(self, -1) + visitor.cell_inds = cell_inds - # self.visit_all_octs(selector, visitor) - - # # Store the index of the neighbour - # cdef NeighbourVisitor n_visitor - # cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.empty_like(cell_inds) - # n_visitor = NeighbourVisitor(self, -1) - # n_visitor.idim = idim - # n_visitor.direction = direction - # n_visitor.cell_inds = cell_inds - # n_visitor.neigh_cell_inds = neigh_cell_inds - # n_visitor.octree = self - # n_visitor.last = -1 - # self.visit_all_octs(always_selector, n_visitor) + self.visit_all_octs(selector, visitor) - # return np.asarray(cell_inds), np.asarray(neigh_cell_inds) + return np.asarray(cell_inds) + + def neighbours_in_direction(self, int idim, int direction, + np.int64_t[:, :, :, :] cell_inds): + """Return index on file of all neighbours in a given direction""" + cdef SelectorObject always_selector = AlwaysSelector(None) + + # Store the index of the neighbour + cdef NeighbourVisitor n_visitor + cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.full_like(cell_inds, -1) + n_visitor = NeighbourVisitor(self, -1) + n_visitor.idim = idim + n_visitor.direction = direction + n_visitor.cell_inds = cell_inds + n_visitor.neigh_cell_inds = neigh_cell_inds + n_visitor.octree = self + n_visitor.last = -1 + self.visit_all_octs(always_selector, n_visitor) + + return np.asarray(neigh_cell_inds) + + #@cython.boundscheck(False) + @cython.wraparound(False) + def copy_neighbour_data(self, + np.int64_t[:] icell, np.int64_t[:] nicell, + np.float64_t[:, :] input, np.float64_t[:, :] output, + int N,): + cdef int i + + for i in range(N): + if nicell[i] > -1 and icell[i] > -1: + output[icell[i], :] = input[nicell[i], :] def file_index_octs_with_shift(self, SelectorObject selector, int domain_id, int idim, int direction, int num_cells = -1): @@ -83,9 +100,7 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): raise NotImplementedError('C-style filling with spatial offset has not been implemented.') else: raise RuntimeError - print('visiting all octs') self.visit_all_octs(selector, neigh_visitor) - print('visited') return shifted_levels, shifted_cell_inds, shifted_file_inds def file_index_octs(self, SelectorObject selector, int domain_id, From bc077f4f23cd1c6d742ec5119b344daf7158bdec Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 23 Jan 2020 16:53:40 +0000 Subject: [PATCH 010/653] Fix octree accessor --- yt/geometry/oct_visitors.pyx | 25 +++++++++++++++++-------- 1 file changed, 17 insertions(+), 8 deletions(-) diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 18d5dc64042..d294575528b 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -456,16 +456,25 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): @cython.wraparound(False) @cython.initializedcheck(False) cdef void visit(self, Oct* o, np.uint8_t selected): - cdef np.int64_t neigbour_cell_index + cdef np.int64_t neigh_file_ind + cdef np.uint8_t neigh_cell_ind + if selected == 0: return # Note: only selected items have an index self.get_neighbour_cell_index(o, selected) - self.shifted_levels[self.index] = self.level - if self.neighbour != NULL: - # Note: we store the local level, not the remote one - self.shifted_file_inds[self.index] = self.neighbour.file_ind - self.shifted_cell_inds[self.index] = self.neighbour_rind() + if not self.other_oct: + neigh_file_ind = o.file_ind + neigh_cell_ind = self.neighbour_rind() + elif self.neighbour != NULL: + neigh_file_ind = self.neighbour.file_ind + neigh_cell_ind = self.neighbour_rind() else: - self.shifted_file_inds[self.index] = -1 - self.shifted_cell_inds[self.index] = 255 # -1 on uint8 + neigh_file_ind = -1 + neigh_cell_ind = 255 + + self.shifted_levels[self.index] = self.level + # Note: we store the local level, not the remote one + self.shifted_file_inds[self.index] = neigh_file_ind + self.shifted_cell_inds[self.index] = neigh_cell_ind + self.index += 1 From 266ac1fb9dab1dd8644e3bcb41bf10c6cc994345 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 09:17:30 +0100 Subject: [PATCH 011/653] Forget about nan when doing gradients --- yt/fields/fluid_fields.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 0a213e43409..f62881a0747 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -65,7 +65,7 @@ def _cell_mass(field, data): def _sound_speed(field, data): tr = data.ds.gamma * data[ftype, "pressure"] / data[ftype, "density"] return np.sqrt(tr) - + registry.add_field((ftype, "sound_speed"), sampling_type="local", function=_sound_speed, @@ -75,7 +75,7 @@ def _radial_mach_number(field, data): """ Radial component of M{|v|/c_sound} """ tr = data[ftype, "radial_velocity"] / data[ftype, "sound_speed"] return np.abs(tr) - + registry.add_field((ftype, "radial_mach_number"), sampling_type="local", function=_radial_mach_number, @@ -93,7 +93,7 @@ def _kin_energy(field, data): def _mach_number(field, data): """ M{|v|/c_sound} """ return data[ftype, "velocity_magnitude"] / data[ftype, "sound_speed"] - + registry.add_field((ftype, "mach_number"), sampling_type="local", function=_mach_number, @@ -213,7 +213,7 @@ def func(field, data): f = data[grad_field][slice_3dr]/ds[slice_3d] f -= data[grad_field][slice_3dl]/ds[slice_3d] new_field = np.zeros_like(data[grad_field], dtype=np.float64) - new_field = data.ds.arr(new_field, f.units) + new_field = data.ds.arr(new_field, vr.units / ds.units) new_field[slice_3d] = f return new_field return func From 0d8bf3acd37a5614661e668b10af351eefc6dcc9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 23 Jan 2020 16:54:02 +0000 Subject: [PATCH 012/653] Accept unyt_array as center --- yt/geometry/coordinates/coordinate_handler.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/geometry/coordinates/coordinate_handler.py b/yt/geometry/coordinates/coordinate_handler.py index 6e7f2f00bd0..b674cada4ce 100644 --- a/yt/geometry/coordinates/coordinate_handler.py +++ b/yt/geometry/coordinates/coordinate_handler.py @@ -7,7 +7,7 @@ fix_unitary, \ iterable from yt.units.yt_array import \ - YTArray, YTQuantity + YTArray, YTQuantity, unyt_array from yt.utilities.exceptions import \ YTCoordinateNotImplemented, \ YTInvalidWidthError @@ -213,7 +213,7 @@ def sanitize_width(self, axis, width, depth): width = (w[0], w[1]) elif iterable(width): width = validate_iterable_width(width, self.ds) - elif isinstance(width, YTQuantity): + elif isinstance(width, (YTQuantity, unyt_array)): width = (width, width) elif isinstance(width, Number): width = (self.ds.quan(width, 'code_length'), From 59923e22e2648ecc895527979ba9332c8dab5388 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 09:18:29 +0100 Subject: [PATCH 013/653] Gradient should be block order-aware --- yt/fields/fluid_fields.py | 13 +++++++++++-- 1 file changed, 11 insertions(+), 2 deletions(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index f62881a0747..5966f8612ff 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -205,16 +205,25 @@ def grad_func(axi, ax): slice_3dl = slice_3d[:axi] + (sl_left,) + slice_3d[axi+1:] slice_3dr = slice_3d[:axi] + (sl_right,) + slice_3d[axi+1:] def func(field, data): + block_reorder = getattr(data, '_block_reorder', 'C') + if block_reorder == 'F': + field_data = data[grad_field].swapaxes(0, 2) + else: + field_data = data[grad_field] ds = div_fac * data[ftype, "d%s" % ax] if ax == "theta": ds *= data[ftype, "r"] if ax == "phi": ds *= data[ftype, "r"] * np.sin(data[ftype, "theta"]) - f = data[grad_field][slice_3dr]/ds[slice_3d] - f -= data[grad_field][slice_3dl]/ds[slice_3d] + f = field_data[slice_3dr]/ds[slice_3d] + f -= field_data[slice_3dl]/ds[slice_3d] new_field = np.zeros_like(data[grad_field], dtype=np.float64) new_field = data.ds.arr(new_field, vr.units / ds.units) new_field[slice_3d] = f + + if block_reorder: + new_field = new_field.swapaxes(0, 2) + return new_field return func From 6d091d5277c8318563e693fff76e7ef5a16eef90 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 23 Jan 2020 18:43:46 +0000 Subject: [PATCH 014/653] cAdd comments --- yt/geometry/oct_visitors.pyx | 3 +++ 1 file changed, 3 insertions(+) diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index d294575528b..00c183cc199 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -337,6 +337,7 @@ cdef class MortonIndexOcts(OctVisitor): np.uint64(coord[2])) self.index += 1 +# Store cell index cdef class StoreIndex(OctVisitor): @cython.boundscheck(False) @cython.wraparound(False) @@ -425,6 +426,7 @@ cdef class BaseNeighbourVisitor(OctVisitor): print('This should not happen! %s %s' % (self.oi.level, self.level)) self.neighbour = NULL +# Store neighbouring cell index in current cell cdef class NeighbourVisitor(BaseNeighbourVisitor): @cython.boundscheck(False) @cython.wraparound(False) @@ -451,6 +453,7 @@ cdef class NeighbourVisitor(BaseNeighbourVisitor): self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind +# Store file position + cell of neighbouring cell in current cell cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): @cython.boundscheck(False) @cython.wraparound(False) From 08fe5a91e90ac58ba45b6efcacda0c9fef0b82e4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 23 Jan 2020 18:44:04 +0000 Subject: [PATCH 015/653] Support reading boundaries between CPUs --- yt/frontends/ramses/data_structures.py | 17 ++++--- yt/frontends/ramses/io_utils.pyx | 67 +++++++++++++------------- 2 files changed, 44 insertions(+), 40 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index b50a3d76f28..6c729231636 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -213,8 +213,9 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') - fill_hydro(fd, file_handler.offset, - file_handler.level_count, levels, cell_inds, + fill_hydro(fd, file_handler.offset[self.domain_id-1:self.domain_id], + file_handler.level_count[self.domain_id-1:self.domain_id], + levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler) return tr @@ -251,10 +252,13 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo for field in fields: tr[field][:] = 0 - fill_hydro(fd, file_handler.offset, - file_handler.level_count, levels, cell_inds, - file_inds, ndim, all_fields, fields, tr, - oct_handler) + fill_hydro( + fd, + file_handler.offset, file_handler.level_count, + levels, cell_inds, + file_inds, ndim, all_fields, fields, tr, + oct_handler + ) _slice = tuple( [slice(None)] + [slice(i, i+self._num_zones) for i in ishift_all]) @@ -325,7 +329,6 @@ def retrieve_ghost_zones(self, ngz, fields, smoothed=False): return new_subset - class RAMSESIndex(OctreeIndex): def __init__(self, ds, dataset_type='ramses'): diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 12fc0918f33..9414c2bf352 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -83,7 +83,7 @@ def read_amr(FortranFile f, dict headers, @cython.nonecheck(False) cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t nvar, dict headers): - cdef np.ndarray[np.int64_t, ndim=1] offset, level_count + cdef np.ndarray[np.int64_t, ndim=2] offset, level_count cdef INT64_t ndim, twotondim, nlevelmax, n_levels, nboundary, ncpu, ncpu_and_bound cdef INT64_t ilevel, icpu, skip_len cdef INT32_t file_ilevel, file_ncache @@ -101,12 +101,11 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n skip_len = twotondim * nvar # It goes: level, CPU, 8-variable (1 oct) - offset = np.zeros(n_levels, dtype=np.int64) - offset -= 1 - level_count = np.zeros(n_levels, dtype=np.int64) + offset = np.full((ncpu, n_levels), -1, dtype=np.int64) + level_count = np.zeros((ncpu, n_levels), dtype=np.int64) - cdef np.int64_t[:] level_count_view = level_count - cdef np.int64_t[:] offset_view = offset + cdef np.int64_t[:,:] level_count_view = level_count + cdef np.int64_t[:,:] offset_view = offset for ilevel in range(nlevelmax): for icpu in range(ncpu_and_bound): @@ -121,9 +120,9 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n 'from data (%s) is not coherent with the expected (%s)', f.name, file_ilevel, ilevel) - if icpu + 1 == domain_id and ilevel >= min_level: - offset[ilevel - min_level] = f.tell() - level_count[ilevel - min_level] = file_ncache + if ilevel >= min_level: + offset_view[icpu, ilevel - min_level] = f.tell() + level_count_view[icpu, ilevel - min_level] = file_ncache f.skip(skip_len) return offset, level_count @@ -133,45 +132,47 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n @cython.cdivision(True) @cython.nonecheck(False) def fill_hydro(FortranFile f, - np.ndarray[np.int64_t, ndim=1] offsets, - np.ndarray[np.int64_t, ndim=1] level_count, + np.ndarray[np.int64_t, ndim=2] offsets, + np.ndarray[np.int64_t, ndim=2] level_count, np.ndarray[np.uint8_t, ndim=1] levels, np.ndarray[np.uint8_t, ndim=1] cell_inds, np.ndarray[np.int64_t, ndim=1] file_inds, INT64_t ndim, list all_fields, list fields, dict tr, RAMSESOctreeContainer oct_handler): - cdef INT64_t ilevel, ifield, nfields, noffset cdef INT64_t offset cdef dict tmp cdef str field - cdef INT64_t twotondim, i + cdef INT64_t twotondim + cdef int ilevel, icpu, ifield, nfields, noffset, nc cdef np.ndarray[np.uint8_t, ndim=1] mask twotondim = 2**ndim nfields = len(all_fields) - noffset = len(offsets) + ncpu = offsets.shape[0] + noffset = offsets.shape[1] mask = np.array([(field in fields) for field in all_fields], dtype=np.uint8) # Loop over levels for ilevel in range(noffset): - offset = offsets[ilevel] - if offset == -1: - continue - f.seek(offset) - nc = level_count[ilevel] - tmp = {} - # Initalize temporary data container for io - for field in all_fields: - tmp[field] = np.empty((nc, twotondim), dtype="float64") - - for i in range(twotondim): - # Read the selected fields - for ifield in range(nfields): - if not mask[ifield]: - f.skip() - else: - tmp[all_fields[ifield]][:, i] = f.read_vector('d') # i-th cell - - oct_handler.fill_level(ilevel, levels, cell_inds, file_inds, tr, tmp) + for icpu in range(ncpu): + offset = offsets[icpu, ilevel] + if offset == -1: + continue + f.seek(offset) + nc = level_count[icpu, ilevel] + tmp = {} + # Initalize temporary data container for io + for field in all_fields: + tmp[field] = np.empty((nc, twotondim), dtype="float64") + + for i in range(twotondim): + # Read the selected fields + for ifield in range(nfields): + if not mask[ifield]: + f.skip() + else: + tmp[all_fields[ifield]][:, i] = f.read_vector('d') # i-th cell + + oct_handler.fill_level(ilevel, levels, cell_inds, file_inds, tr, tmp) From d6b508b4df10315caf1c0d4e4ba5ef8166708f4e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 28 Jan 2020 09:02:22 +0000 Subject: [PATCH 016/653] Filling hydro with boundary information --- yt/frontends/ramses/data_structures.py | 31 +++++++++++++++++---- yt/frontends/ramses/io_utils.pyx | 19 +++++++++---- yt/geometry/oct_container.pyx | 4 +-- yt/geometry/oct_visitors.pxd | 1 + yt/geometry/oct_visitors.pyx | 9 ++++-- yt/geometry/ramses_oct_container.pyx | 38 +++++++++++++++++++++++--- 6 files changed, 83 insertions(+), 19 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 6c729231636..55776805fe6 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -213,8 +213,9 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') - fill_hydro(fd, file_handler.offset[self.domain_id-1:self.domain_id], - file_handler.level_count[self.domain_id-1:self.domain_id], + fill_hydro(fd, file_handler.offset, + file_handler.level_count, + [self.domain_id-1], levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler) @@ -228,10 +229,19 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo all_fields = [f for ft, f in file_handler.field_list] fields = [f for ft, f in fields] - oct_count = selector.count_octs(self.oct_handler, self.domain_id) + # Select all cells, including those in *other domain* + selector = OctreeSubsetSelector(self) + selector_with_edge = OctreeSubsetSelector(self) + selector_with_edge.domain_id = -1 + + oct_count = selector_with_edge.count_octs(self.oct_handler, -1) cell_count = oct_count * self._num_zones**ndim iwidth = self._num_zones + num_ghost_zones * 2 + oct_count0 = selector.count_octs(self.oct_handler, self.domain_id) + cell_count0 = oct_count0 * self._num_zones**ndim + ncpus = file_handler.offset.shape[0] + tr = {} tr_all = {} for field in fields: @@ -245,9 +255,15 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo ishift_all[idim] = num_ghost_zones + shift if shift == 0: continue - levels, cell_inds, file_inds = self.oct_handler.file_index_octs_with_shift( - selector, self.domain_id, idim, shift, cell_count) + levels0, cell_inds0, file_inds0, domain0 = \ + self.oct_handler.file_index_octs_with_shift( + selector, self.domain_id, idim, shift, cell_count0) + + levels, cell_inds, file_inds, domain = \ + self.oct_handler.file_index_octs_with_shift( + selector_with_edge, -1, idim, shift, cell_count) + # import ipdb; ipdb.set_trace() # Initializing data container for field in fields: tr[field][:] = 0 @@ -255,9 +271,12 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo fill_hydro( fd, file_handler.offset, file_handler.level_count, + #file_handler.offset, file_handler.level_count, + list(range(ncpus)), levels, cell_inds, file_inds, ndim, all_fields, fields, tr, - oct_handler + oct_handler, + domains=domain.astype(np.int64) ) _slice = tuple( [slice(None)] + diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 9414c2bf352..0993668b0b2 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -134,29 +134,34 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n def fill_hydro(FortranFile f, np.ndarray[np.int64_t, ndim=2] offsets, np.ndarray[np.int64_t, ndim=2] level_count, + list cpu_enumerator, np.ndarray[np.uint8_t, ndim=1] levels, np.ndarray[np.uint8_t, ndim=1] cell_inds, np.ndarray[np.int64_t, ndim=1] file_inds, INT64_t ndim, list all_fields, list fields, dict tr, - RAMSESOctreeContainer oct_handler): + RAMSESOctreeContainer oct_handler, + np.ndarray[np.int64_t, ndim=1] domains=np.array([], dtype='int64'), + int domain=-1): cdef INT64_t offset cdef dict tmp cdef str field cdef INT64_t twotondim - cdef int ilevel, icpu, ifield, nfields, noffset, nc + cdef int ilevel, icpu, ifield, nfields, noffset, nc, ncpu_selected cdef np.ndarray[np.uint8_t, ndim=1] mask twotondim = 2**ndim nfields = len(all_fields) ncpu = offsets.shape[0] noffset = offsets.shape[1] + ncpu_selected = len(cpu_enumerator) mask = np.array([(field in fields) for field in all_fields], dtype=np.uint8) # Loop over levels for ilevel in range(noffset): - for icpu in range(ncpu): + # Loop over cpu domains + for icpu in cpu_enumerator: offset = offsets[icpu, ilevel] if offset == -1: continue @@ -174,5 +179,9 @@ def fill_hydro(FortranFile f, f.skip() else: tmp[all_fields[ifield]][:, i] = f.read_vector('d') # i-th cell - - oct_handler.fill_level(ilevel, levels, cell_inds, file_inds, tr, tmp) + if ncpu_selected > 1: + oct_handler.fill_level_with_domain( + ilevel, levels, cell_inds, file_inds, domains, tr, tmp, domain=icpu) + else: + oct_handler.fill_level( + ilevel, levels, cell_inds, file_inds, tr, tmp) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 73d349547f7..02f00f19437 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -669,9 +669,9 @@ cdef class OctreeContainer: file_inds = np.zeros(num_cells, dtype="int64") cell_inds = np.zeros(num_cells, dtype="uint8") for i in range(num_cells): - levels[i] = 100 + levels[i] = 255 file_inds[i] = -1 - cell_inds[i] = 9 + cell_inds[i] = 8 cdef oct_visitors.FillFileIndicesO visitor_o cdef oct_visitors.FillFileIndicesR visitor_r if self.fill_style == "r": diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 675bacdf8c5..5e0f8a395c3 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -173,4 +173,5 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.uint8_t[:] shifted_levels cdef np.int64_t[:] shifted_file_inds cdef np.uint8_t[:] shifted_cell_inds + cdef np.int32_t[:] neigh_domain diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 00c183cc199..a2cc19f6e2a 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -461,23 +461,28 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef void visit(self, Oct* o, np.uint8_t selected): cdef np.int64_t neigh_file_ind cdef np.uint8_t neigh_cell_ind + cdef np.int32_t neigh_domain if selected == 0: return # Note: only selected items have an index self.get_neighbour_cell_index(o, selected) if not self.other_oct: - neigh_file_ind = o.file_ind + neigh_domain = 0 + neigh_file_ind = o.domain neigh_cell_ind = self.neighbour_rind() elif self.neighbour != NULL: + neigh_domain = self.neighbour.domain neigh_file_ind = self.neighbour.file_ind neigh_cell_ind = self.neighbour_rind() else: + neigh_domain = -1 neigh_file_ind = -1 - neigh_cell_ind = 255 + neigh_cell_ind = 8 self.shifted_levels[self.index] = self.level # Note: we store the local level, not the remote one self.shifted_file_inds[self.index] = neigh_file_ind self.shifted_cell_inds[self.index] = neigh_cell_ind + self.neigh_domain[self.index] = neigh_domain self.index += 1 diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index 8f471775f11..bc001b2e836 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -81,9 +81,11 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels cdef np.ndarray[np.uint8_t, ndim=1] shifted_cell_inds cdef np.ndarray[np.int64_t, ndim=1] shifted_file_inds - shifted_levels = np.zeros(num_cells, dtype="uint8") - shifted_file_inds = np.zeros(num_cells, dtype="int64") - shifted_cell_inds = np.zeros(num_cells, dtype="uint8") + cdef np.ndarray[np.int32_t, ndim=1] neigh_domain + shifted_levels = np.full(num_cells, 255, dtype="uint8") + shifted_file_inds = np.full(num_cells, -1, dtype="int64") + shifted_cell_inds = np.full(num_cells, 8, dtype="uint8") + neigh_domain = np.full(num_cells, -1, dtype="int32") if self.fill_style == "r": neigh_visitor = FillFileIndicesRNeighbour(self, domain_id) @@ -91,6 +93,7 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): neigh_visitor.shifted_levels = shifted_levels neigh_visitor.shifted_file_inds = shifted_file_inds neigh_visitor.shifted_cell_inds = shifted_cell_inds + neigh_visitor.neigh_domain = neigh_domain # direction to explore and extra parameters of the visitor neigh_visitor.idim = idim neigh_visitor.direction = direction @@ -101,7 +104,7 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): else: raise RuntimeError self.visit_all_octs(selector, neigh_visitor) - return shifted_levels, shifted_cell_inds, shifted_file_inds + return shifted_levels, shifted_cell_inds, shifted_file_inds, neigh_domain def file_index_octs(self, SelectorObject selector, int domain_id, num_cells = -1, spatial_offset=(0, 0, 0)): @@ -129,3 +132,30 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): else: return self.file_index_octs_with_shift( selector, domain_id, idim, direction, num_cells) + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def fill_level_with_domain( + self, int level, + np.uint8_t[:] levels, + np.uint8_t[:] cell_inds, + np.int64_t[:] file_inds, + np.int64_t[:] domains, + dest_fields, source_fields, + np.int64_t domain, + np.int64_t offset = 0 + ): + cdef np.ndarray[np.float64_t, ndim=2] source + cdef np.ndarray[np.float64_t, ndim=1] dest + cdef int i + cdef str key + for key in dest_fields: + dest = dest_fields[key] + source = source_fields[key] + for i in range(levels.shape[0]): + if levels[i] != level or domains[i] != domain: continue + if file_inds[i] < 0: + dest[i + offset] = np.nan + else: + dest[i + offset] = source[file_inds[i], cell_inds[i]] From fb6462aa2309cb0e0f373de9ee8ac4a73acd3582 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 09:10:45 +0000 Subject: [PATCH 017/653] Use Fortran ordering to read in for faster results --- yt/frontends/ramses/io_utils.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 0993668b0b2..0cb1398ea72 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -170,7 +170,7 @@ def fill_hydro(FortranFile f, tmp = {} # Initalize temporary data container for io for field in all_fields: - tmp[field] = np.empty((nc, twotondim), dtype="float64") + tmp[field] = np.empty((nc, twotondim), dtype="float64", order='F') for i in range(twotondim): # Read the selected fields From d23b9a1649ec9ef2413d70803d25d4a5babe8fc1 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 12:57:04 +0000 Subject: [PATCH 018/653] Working version? This is not crashing and there are very few glitches. --- yt/frontends/ramses/data_structures.py | 67 +++---------- yt/frontends/ramses/io_utils.pyx | 13 +-- yt/geometry/oct_container.pyx | 2 +- yt/geometry/oct_visitors.pxd | 5 + yt/geometry/oct_visitors.pyx | 124 ++++++++++++++++++++++++- yt/geometry/ramses_oct_container.pyx | 102 ++++++++++---------- 6 files changed, 198 insertions(+), 115 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 55776805fe6..7f2dd60c51a 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -228,65 +228,24 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo oct_handler = self.oct_handler all_fields = [f for ft, f in file_handler.field_list] fields = [f for ft, f in fields] + tr = {} - # Select all cells, including those in *other domain* - selector = OctreeSubsetSelector(self) - selector_with_edge = OctreeSubsetSelector(self) - selector_with_edge.domain_id = -1 - - oct_count = selector_with_edge.count_octs(self.oct_handler, -1) - cell_count = oct_count * self._num_zones**ndim - iwidth = self._num_zones + num_ghost_zones * 2 + cell_count = selector.count_octs(self.oct_handler, self.domain_id) * self.nz**ndim - oct_count0 = selector.count_octs(self.oct_handler, self.domain_id) - cell_count0 = oct_count0 * self._num_zones**ndim - ncpus = file_handler.offset.shape[0] + levels, cell_inds, file_inds, domains = self.oct_handler.compute_domain_mapper( + selector, self.domain_id, cell_count) - tr = {} - tr_all = {} + # Initializing data container for field in fields: - tr_all[field] = np.full((oct_count, iwidth, iwidth, iwidth), np.nan, 'float64') tr[field] = np.zeros(cell_count, 'float64') - - # Compute the index to read with a positive and negative shift in all dimensions - for idim in range(ndim): - ishift_all = [num_ghost_zones]*ndim - for shift in range(-num_ghost_zones, num_ghost_zones+1, 2): - ishift_all[idim] = num_ghost_zones + shift - if shift == 0: - continue - levels0, cell_inds0, file_inds0, domain0 = \ - self.oct_handler.file_index_octs_with_shift( - selector, self.domain_id, idim, shift, cell_count0) - - levels, cell_inds, file_inds, domain = \ - self.oct_handler.file_index_octs_with_shift( - selector_with_edge, -1, idim, shift, cell_count) - - # import ipdb; ipdb.set_trace() - # Initializing data container - for field in fields: - tr[field][:] = 0 - - fill_hydro( - fd, - file_handler.offset, file_handler.level_count, - #file_handler.offset, file_handler.level_count, - list(range(ncpus)), - levels, cell_inds, - file_inds, ndim, all_fields, fields, tr, - oct_handler, - domains=domain.astype(np.int64) - ) - _slice = tuple( - [slice(None)] + - [slice(i, i+self._num_zones) for i in ishift_all]) - for field in fields: - tr_all[field][_slice] = \ - tr[field].reshape(oct_count, 2, 2, 2) - for field in fields: - tr_all[field] = tr_all[field].reshape(-1) - return tr_all + fill_hydro(fd, file_handler.offset, + file_handler.level_count, + [self.domain_id-1], + levels, cell_inds, + file_inds, ndim, all_fields, fields, tr, + oct_handler, + domains=domains) + return tr @property def fwidth(self): diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 0cb1398ea72..fb74ed48216 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -141,32 +141,33 @@ def fill_hydro(FortranFile f, INT64_t ndim, list all_fields, list fields, dict tr, RAMSESOctreeContainer oct_handler, - np.ndarray[np.int64_t, ndim=1] domains=np.array([], dtype='int64'), - int domain=-1): + np.ndarray[np.int32_t, ndim=1] domains=np.array([], dtype='int32')): cdef INT64_t offset cdef dict tmp cdef str field cdef INT64_t twotondim - cdef int ilevel, icpu, ifield, nfields, noffset, nc, ncpu_selected + cdef int ilevel, icpu, ifield, nfields, nlevels, nc, ncpu_selected cdef np.ndarray[np.uint8_t, ndim=1] mask twotondim = 2**ndim nfields = len(all_fields) ncpu = offsets.shape[0] - noffset = offsets.shape[1] + nlevels = offsets.shape[1] ncpu_selected = len(cpu_enumerator) mask = np.array([(field in fields) for field in all_fields], dtype=np.uint8) # Loop over levels - for ilevel in range(noffset): + for ilevel in range(nlevels): # Loop over cpu domains for icpu in cpu_enumerator: + nc = level_count[icpu, ilevel] + if nc == 0: + continue offset = offsets[icpu, ilevel] if offset == -1: continue f.seek(offset) - nc = level_count[icpu, ilevel] tmp = {} # Initalize temporary data container for io for field in all_fields: diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 02f00f19437..437462c333f 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -548,7 +548,7 @@ cdef class OctreeContainer: def domain_ind(self, selector, int domain_id = -1): cdef np.ndarray[np.int64_t, ndim=1] ind # Here's where we grab the masked items. - ind = np.zeros(self.nocts, 'int64') - 1 + ind = np.full(self.nocts, -1, 'int64') cdef oct_visitors.IndexOcts visitor visitor = oct_visitors.IndexOcts(self, domain_id) visitor.oct_index = ind diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 5e0f8a395c3..1616b36b943 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -175,3 +175,8 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.uint8_t[:] shifted_cell_inds cdef np.int32_t[:] neigh_domain +cdef class NeighbourCellVisitor(BaseNeighbourVisitor): + cdef np.uint8_t[:] shifted_levels + cdef np.int64_t[:] shifted_file_inds + cdef np.uint8_t[:] shifted_cell_inds + cdef np.int32_t[:] neigh_domain \ No newline at end of file diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index a2cc19f6e2a..3e5c708907d 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -471,7 +471,7 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): neigh_file_ind = o.domain neigh_cell_ind = self.neighbour_rind() elif self.neighbour != NULL: - neigh_domain = self.neighbour.domain + neigh_domain = self.neighbour.domain neigh_file_ind = self.neighbour.file_ind neigh_cell_ind = self.neighbour_rind() else: @@ -486,3 +486,125 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): self.neigh_domain[self.index] = neigh_domain self.index += 1 + +# Store file position + cell of neighbouring cell in current cell +cdef class NeighbourCellNeighbourCellVisitor(BaseNeighbourVisitor): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void set_neighbour_info(self, Oct *o, int ishift[3]): + cdef int i + cdef np.float64_t c, dx + cdef np.int64_t ipos + cdef np.float64_t fcoords[3] + cdef Oct *neighbour + cdef bint local_oct + cdef bint other_oct + dx = 1.0 / ((1 << self.oref) << self.level) + local_oct = True + + # Compute position of neighbouring cell + for i in range(3): + c = (self.pos[i] << self.oref) + fcoords[i] = (c + 0.5 + ishift[i]) * dx / self.octree.nn[i] + local_oct &= (0 <= ishift[i] <= 1) + other_oct = not local_oct + + # Use octree to find neighbour + if other_oct: + neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) + else: + neighbour = o + self.oi.level = self.level + for i in range(3): + self.oi.ipos[i] = (self.pos[i] << self.oref) + ishift[i] + + # Extra step - compute cell position in neighbouring oct (and store in oi.ipos) + if self.oi.level == self.level - 1: + for i in range(3): + ipos = (((self.pos[i] << self.oref) + ishift[i])) >> 1 + if (self.oi.ipos[i] << 1) == ipos: + self.oi.ipos[i] = 0 + else: + self.oi.ipos[i] = 1 + self.neighbour = neighbour + + # Index of neighbouring cell within its oct + for i in range(3): + self.neigh_ind[i] = (ishift[i]) % 2 + + self.other_oct = other_oct + if other_oct: + if self.neighbour != NULL: + if self.oi.level == self.level - 1: + # Position within neighbouring oct is stored in oi.ipos + for i in range(3): + self.neigh_ind[i] = self.oi.ipos[i] + elif self.oi.level != self.level: + print('This should not happen! %s %s' % (self.oi.level, self.level)) + self.neighbour = NULL + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef int i, j, k + cdef int ishift[3] + cdef np.int64_t neigh_file_ind + cdef np.uint8_t neigh_cell_ind + cdef np.int32_t neigh_domain + cdef np.uint8_t neigh_level + if selected == 0: return + # Work at oct level + if self.last == o.domain_ind: return + + self.last = o.domain_ind + + # Loop over cells in and directly around oct + for i in range(-1, 3): + ishift[0] = i + for j in range(-1, 3): + ishift[1] = j + for k in range(-1, 3): + ishift[2] = k + self.set_neighbour_info(o, ishift) + + if not self.other_oct: + neigh_level = self.level + neigh_domain = o.domain + neigh_file_ind = o.file_ind + neigh_cell_ind = self.neighbour_rind() + elif self.neighbour != NULL: + neigh_level = self.oi.level + neigh_domain = self.neighbour.domain + neigh_file_ind = self.neighbour.file_ind + neigh_cell_ind = self.neighbour_rind() + else: + neigh_level = 255 + neigh_domain = -1 + neigh_file_ind = -1 + neigh_cell_ind = 8 + + self.shifted_levels[self.index] = neigh_level + self.shifted_file_inds[self.index] = neigh_file_ind + self.shifted_cell_inds[self.index] = neigh_cell_ind + self.neigh_domain[self.index] = neigh_domain + + self.index += 1# Compute mapping from index in domain to index in domain with buffer zones +cdef class DomainMapper(BaseNeighbourVisitor): + # Intended to map all octs ids to only octs in local domain + # Should be used in conjunction with OctreeSubsetSelector + def __init__(self, OctreeContainer octree, int domain_id): + super(DomainMapper, self).__init__(octree, domain_id) + self.index_in = 0 + + #@cython.boundscheck(False) + #@cython.wraparound(False) + #@cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + if self.last != o.domain_ind: + self.last = o.domain_ind + if self.marked[self.index]: + self.mapping[self.index] = self.index_in + self.index_in += 1 + self.index += 1 diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index bc001b2e836..40a7d7d94e2 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -1,18 +1,9 @@ cimport cython -from oct_visitors cimport FillFileIndicesRNeighbour, StoreIndex, NeighbourVisitor -from selection_routines cimport SelectorObject, AlwaysSelector +from oct_visitors cimport StoreIndex, NeighbourVisitor, NeighbourCellVisitor +from selection_routines cimport SelectorObject, AlwaysSelector, OctreeSubsetSelector cimport numpy as np import numpy as np -# cdef class FillFileIndices(oct_visitors.OctVisitor): -# cdef np.int64_t[:,:,:,:] cell_inds -# @cython.boundscheck(False) -# @cython.wraparound(False) -# @cython.initializedcheck(False) -# cdef void visit(self, Oct* o, np.uint8_t selected): -# if selected == 0: return -# self.cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = self.index -# self.index += 1 cdef class RAMSESOctreeContainer(SparseOctreeContainer): @cython.boundscheck(False) @@ -68,44 +59,6 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): if nicell[i] > -1 and icell[i] > -1: output[icell[i], :] = input[nicell[i], :] - def file_index_octs_with_shift(self, SelectorObject selector, int domain_id, - int idim, int direction, int num_cells = -1): - """Return index on file of all neighbours in a given direction""" - # We create oct arrays of the correct size - cdef np.int64_t i - if num_cells < 0: - num_cells = selector.count_oct_cells(self, domain_id) - - # Fill value of each cell with its neighbouring value - cdef FillFileIndicesRNeighbour neigh_visitor - cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels - cdef np.ndarray[np.uint8_t, ndim=1] shifted_cell_inds - cdef np.ndarray[np.int64_t, ndim=1] shifted_file_inds - cdef np.ndarray[np.int32_t, ndim=1] neigh_domain - shifted_levels = np.full(num_cells, 255, dtype="uint8") - shifted_file_inds = np.full(num_cells, -1, dtype="int64") - shifted_cell_inds = np.full(num_cells, 8, dtype="uint8") - neigh_domain = np.full(num_cells, -1, dtype="int32") - - if self.fill_style == "r": - neigh_visitor = FillFileIndicesRNeighbour(self, domain_id) - # output: level, file_ind and cell_ind of the neighbouring cells - neigh_visitor.shifted_levels = shifted_levels - neigh_visitor.shifted_file_inds = shifted_file_inds - neigh_visitor.shifted_cell_inds = shifted_cell_inds - neigh_visitor.neigh_domain = neigh_domain - # direction to explore and extra parameters of the visitor - neigh_visitor.idim = idim - neigh_visitor.direction = direction - neigh_visitor.octree = self - neigh_visitor.last = -1 - elif self.fill_style == "o": - raise NotImplementedError('C-style filling with spatial offset has not been implemented.') - else: - raise RuntimeError - self.visit_all_octs(selector, neigh_visitor) - return shifted_levels, shifted_cell_inds, shifted_file_inds, neigh_domain - def file_index_octs(self, SelectorObject selector, int domain_id, num_cells = -1, spatial_offset=(0, 0, 0)): @@ -142,20 +95,63 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): np.uint8_t[:] cell_inds, np.int64_t[:] file_inds, np.int64_t[:] domains, - dest_fields, source_fields, - np.int64_t domain, + dict dest_fields, dict source_fields, + np.int32_t domain, np.int64_t offset = 0 ): cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest - cdef int i + cdef np.float64_t tmp + cdef int i, count cdef str key for key in dest_fields: dest = dest_fields[key] source = source_fields[key] + count = 0 for i in range(levels.shape[0]): if levels[i] != level or domains[i] != domain: continue + count += 1 if file_inds[i] < 0: dest[i + offset] = np.nan else: - dest[i + offset] = source[file_inds[i], cell_inds[i]] + # print(f'\t{i}: Accessing source {file_inds[i]}:{cell_inds[i]} source.shape=({source.shape[0]},{source.shape[1]})') + tmp =source[file_inds[i], cell_inds[i]] + dest[i + offset] = tmp # source[file_inds[i], cell_inds[i]] + return count + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def file_index_octs_with_shift( + self, SelectorObject selector, int domain_id, + int num_cells = -1): + cdef np.int64_t i + cdef int num_octs + if num_cells < 0: + num_octs = selector.count_octs(self, domain_id) + num_cells = num_octs * 4**3 + cdef NeighbourCellVisitor visitor + + cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels + cdef np.ndarray[np.uint8_t, ndim=1] shifted_cell_inds + cdef np.ndarray[np.int64_t, ndim=1] shifted_file_inds + cdef np.ndarray[np.int32_t, ndim=1] neigh_domain + shifted_levels = np.full(num_cells, 255, dtype="uint8") + shifted_file_inds = np.full(num_cells, -1, dtype="int64") + shifted_cell_inds = np.full(num_cells, 8, dtype="uint8") + neigh_domain = np.full(num_cells, -1, dtype="int32") + + visitor = NeighbourCellVisitor(self, -1) + # output: level, file_ind and cell_ind of the neighbouring cells + visitor.shifted_levels = shifted_levels + visitor.shifted_file_inds = shifted_file_inds + visitor.shifted_cell_inds = shifted_cell_inds + visitor.neigh_domain = neigh_domain + # direction to explore and extra parameters of the visitor + visitor.octree = self + visitor.last = -1 + + # Compute indices + self.visit_all_octs(selector, visitor) + + return shifted_levels, shifted_cell_inds, shifted_file_inds, neigh_domain \ No newline at end of file From 5859895159e44dc857f473fe81a755e630413dfe Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 12:57:38 +0000 Subject: [PATCH 019/653] Correct erroneous reference --- yt/geometry/oct_visitors.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 3e5c708907d..36474c5ac1e 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -467,8 +467,8 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): # Note: only selected items have an index self.get_neighbour_cell_index(o, selected) if not self.other_oct: - neigh_domain = 0 - neigh_file_ind = o.domain + neigh_domain = o.domain + neigh_file_ind = o.file_ind neigh_cell_ind = self.neighbour_rind() elif self.neighbour != NULL: neigh_domain = self.neighbour.domain From cac9d1968bb1a3543442bc8244cb9fe1ccc15e6f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 12:57:56 +0000 Subject: [PATCH 020/653] Catch early bug --- yt/frontends/ramses/io_utils.pyx | 2 ++ 1 file changed, 2 insertions(+) diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index fb74ed48216..7ca7d106b9d 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -74,6 +74,8 @@ def read_amr(FortranFile f, dict headers, count_boundary = 1) if n > 0: max_level = max(ilevel - min_level, max_level) + if n != ng: + raise Exception('Expected %s octs, got %s' % (ng, n)) return max_level From dbeccb83d53209f959293e0e41a70630bba1e7a2 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:03:46 +0000 Subject: [PATCH 021/653] More explicit names --- yt/frontends/ramses/data_structures.py | 2 +- yt/geometry/ramses_oct_container.pyx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 7f2dd60c51a..c6258ba38ef 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -232,7 +232,7 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo cell_count = selector.count_octs(self.oct_handler, self.domain_id) * self.nz**ndim - levels, cell_inds, file_inds, domains = self.oct_handler.compute_domain_mapper( + levels, cell_inds, file_inds, domains = self.oct_handler.file_index_octs_with_ghost_zones( selector, self.domain_id, cell_count) # Initializing data container diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index 40a7d7d94e2..af390d68b82 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -122,7 +122,7 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def file_index_octs_with_shift( + def file_index_octs_with_ghost_zones( self, SelectorObject selector, int domain_id, int num_cells = -1): cdef np.int64_t i From 2ddaebb0f5a02828104149ce0d9c7b0b377df512 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:11:16 +0000 Subject: [PATCH 022/653] Correct discrepancy between .pxd and .pyx file --- yt/geometry/oct_visitors.pxd | 4 +++- yt/geometry/oct_visitors.pyx | 2 +- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 1616b36b943..84199f27230 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -179,4 +179,6 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): cdef np.uint8_t[:] shifted_levels cdef np.int64_t[:] shifted_file_inds cdef np.uint8_t[:] shifted_cell_inds - cdef np.int32_t[:] neigh_domain \ No newline at end of file + cdef np.int32_t[:] neigh_domain + + cdef void set_neighbour_info(self, Oct *o, int ishift[3]) \ No newline at end of file diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 36474c5ac1e..ebb14f7cee7 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -488,7 +488,7 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): self.index += 1 # Store file position + cell of neighbouring cell in current cell -cdef class NeighbourCellNeighbourCellVisitor(BaseNeighbourVisitor): +cdef class NeighbourCellVisitor(BaseNeighbourVisitor): @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) From 3c20d534d2c6d249dc6d63856fae3e3d599e423b Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:19:23 +0000 Subject: [PATCH 023/653] UNSURE ABOUT THIS ONE, REMOVE ME IF YOU HAVE WEIRD RESULTS --- yt/frontends/ramses/io_utils.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 7ca7d106b9d..e666b68deb3 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -184,7 +184,7 @@ def fill_hydro(FortranFile f, tmp[all_fields[ifield]][:, i] = f.read_vector('d') # i-th cell if ncpu_selected > 1: oct_handler.fill_level_with_domain( - ilevel, levels, cell_inds, file_inds, domains, tr, tmp, domain=icpu) + ilevel, levels, cell_inds, file_inds, domains, tr, tmp, domain=icpu+1) else: oct_handler.fill_level( ilevel, levels, cell_inds, file_inds, tr, tmp) From 12657695638595058733407f13e20de8d38520e0 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:33:20 +0000 Subject: [PATCH 024/653] Read boundary regions as well --- yt/frontends/ramses/data_structures.py | 3 ++- yt/geometry/ramses_oct_container.pyx | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c6258ba38ef..62b894541b8 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -223,6 +223,7 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zones): ndim = self.ds.dimensionality + ncpu = self.ds.parameters['ncpu'] # Here we get a copy of the file, which we skip through and read the # bits we want. oct_handler = self.oct_handler @@ -240,7 +241,7 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo tr[field] = np.zeros(cell_count, 'float64') fill_hydro(fd, file_handler.offset, file_handler.level_count, - [self.domain_id-1], + list(range(ncpu)), levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler, diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx index af390d68b82..3d80bf4ffc5 100644 --- a/yt/geometry/ramses_oct_container.pyx +++ b/yt/geometry/ramses_oct_container.pyx @@ -94,7 +94,7 @@ cdef class RAMSESOctreeContainer(SparseOctreeContainer): np.uint8_t[:] levels, np.uint8_t[:] cell_inds, np.int64_t[:] file_inds, - np.int64_t[:] domains, + np.int32_t[:] domains, dict dest_fields, dict source_fields, np.int32_t domain, np.int64_t offset = 0 From 41f05017a9cf769a3896bc5abd05eb3a9d014e50 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:46:52 +0000 Subject: [PATCH 025/653] Cannot assume that keys are string by default (can be tuple) --- yt/geometry/oct_container.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 437462c333f..63d80994bb6 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -732,7 +732,7 @@ cdef class OctreeContainer: cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest cdef int i - cdef str key + for key in dest_fields: dest = dest_fields[key] source = source_fields[key] From a39b4e7518446da4289d23cff14820627457ddfd Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 13:58:45 +0000 Subject: [PATCH 026/653] Inform gradient about ordering --- yt/data_objects/octree_subset.py | 1 + yt/fields/fluid_fields.py | 6 +++--- yt/frontends/ramses/data_structures.py | 1 + 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index f98876c5e34..bf02807cf05 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -36,6 +36,7 @@ class OctreeSubset(YTSelectionContainer): _domain_offset = 0 _cell_count = -1 _block_reorder = None + _gradient_swap_axes = False # Set to True if one should swap the axes when computing gradient def __init__(self, base_region, domain, ds, over_refine_factor = 1, num_ghost_zones = 0): super(OctreeSubset, self).__init__(ds, None) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 5966f8612ff..89cdf112f53 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -205,8 +205,8 @@ def grad_func(axi, ax): slice_3dl = slice_3d[:axi] + (sl_left,) + slice_3d[axi+1:] slice_3dr = slice_3d[:axi] + (sl_right,) + slice_3d[axi+1:] def func(field, data): - block_reorder = getattr(data, '_block_reorder', 'C') - if block_reorder == 'F': + reorder = getattr(data, '_gradient_swap_axes', False) + if reorder: field_data = data[grad_field].swapaxes(0, 2) else: field_data = data[grad_field] @@ -221,7 +221,7 @@ def func(field, data): new_field = data.ds.arr(new_field, vr.units / ds.units) new_field[slice_3d] = f - if block_reorder: + if reorder: new_field = new_field.swapaxes(0, 2) return new_field diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 62b894541b8..2c829e4f1b0 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -183,6 +183,7 @@ class RAMSESDomainSubset(OctreeSubset): _domain_offset = 1 _block_reorder = "F" + _gradient_swap_axes = True _base_domain = None From 463cd9a0d57185508a1b2f8c92063a6413b9ee4d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 09:19:12 +0100 Subject: [PATCH 027/653] Use less ambiguous name ds usually refers to a dataset. --- yt/fields/fluid_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 89cdf112f53..9a060259ee2 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -218,7 +218,7 @@ def func(field, data): f = field_data[slice_3dr]/ds[slice_3d] f -= field_data[slice_3dl]/ds[slice_3d] new_field = np.zeros_like(data[grad_field], dtype=np.float64) - new_field = data.ds.arr(new_field, vr.units / ds.units) + new_field = data.ds.arr(new_field, vr.units / dt.units) new_field[slice_3d] = f if reorder: From 295695ebeb265ed74b141ab3e17872557f23d42e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 16:28:34 +0000 Subject: [PATCH 028/653] Use block_reorder to decide when transposing --- yt/data_objects/octree_subset.py | 1 - yt/fields/fluid_fields.py | 6 +++--- yt/frontends/ramses/data_structures.py | 1 - 3 files changed, 3 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index bf02807cf05..f98876c5e34 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -36,7 +36,6 @@ class OctreeSubset(YTSelectionContainer): _domain_offset = 0 _cell_count = -1 _block_reorder = None - _gradient_swap_axes = False # Set to True if one should swap the axes when computing gradient def __init__(self, base_region, domain, ds, over_refine_factor = 1, num_ghost_zones = 0): super(OctreeSubset, self).__init__(ds, None) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 9a060259ee2..ce927240a2a 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -205,8 +205,8 @@ def grad_func(axi, ax): slice_3dl = slice_3d[:axi] + (sl_left,) + slice_3d[axi+1:] slice_3dr = slice_3d[:axi] + (sl_right,) + slice_3d[axi+1:] def func(field, data): - reorder = getattr(data, '_gradient_swap_axes', False) - if reorder: + block_reorder = getattr(data, '_block_reorder', None) + if block_reorder == 'F': field_data = data[grad_field].swapaxes(0, 2) else: field_data = data[grad_field] @@ -221,7 +221,7 @@ def func(field, data): new_field = data.ds.arr(new_field, vr.units / dt.units) new_field[slice_3d] = f - if reorder: + if block_reorder == 'F': new_field = new_field.swapaxes(0, 2) return new_field diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 2c829e4f1b0..62b894541b8 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -183,7 +183,6 @@ class RAMSESDomainSubset(OctreeSubset): _domain_offset = 1 _block_reorder = "F" - _gradient_swap_axes = True _base_domain = None From 9c04156e07b38a5397057ecb1afac4b43a4c1751 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 16:28:48 +0000 Subject: [PATCH 029/653] ("index, "ones") should return data with units --- yt/fields/geometric_fields.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/fields/geometric_fields.py b/yt/fields/geometric_fields.py index 3b8fa041fa3..d6c73b3b3b7 100644 --- a/yt/fields/geometric_fields.py +++ b/yt/fields/geometric_fields.py @@ -94,9 +94,10 @@ def _zeros(field, data): def _ones(field, data): """Returns one for all cells""" arr = np.ones(data.ires.shape, dtype="float64") + tmp = data.apply_units(arr, field.units) if data._spatial: - return data._reshape_vals(arr) - return data.apply_units(arr, field.units) + return data._reshape_vals(tmp) + return tmp registry.add_field(("index", "ones"), sampling_type="cell", From 7c8fe2fd86e789d74451a5a1e10001d827b79e98 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 09:19:42 +0100 Subject: [PATCH 030/653] Add unit test --- yt/frontends/ramses/tests/test_outputs.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 109ded6608c..7b55031ffaa 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -461,3 +461,17 @@ def test_magnetic_field_aliasing(): 'magnetic_field_divergence']: assert ('gas',field) in ds.derived_field_list ad[('gas',field)] + +output_00080 = "output_00080/info_00080.txt" +@requires_file(output_00080) +def test_field_accession(): + ds = yt.load(output_00080) + fields = [ + ('gas', 'density'), # basic ones + ('gas' ,'pressure'), + ('gas', 'pressure_gradient_magnitude'), # requires ghost zones + ] + # Check accessing gradient works for a variety of spatial domains + for reg in (ds.all_data(), ds.sphere([.1]*3, .01), ds.sphere([.5]*3, 0.05), ds.box([.1]*3, [.2]*3)): + for field in fields: + reg[field] From c42a2fb0bac87fb1795e607afe71b7e13153e6ca Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 16:52:20 +0000 Subject: [PATCH 031/653] Add new answer test --- yt/frontends/ramses/tests/test_outputs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 7b55031ffaa..1538344e694 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -17,7 +17,7 @@ import yt import numpy as np -_fields = ("temperature", "density", "velocity_magnitude") +_fields = ("temperature", "density", "velocity_magnitude", "pressure_gradient_magnitude") output_00080 = "output_00080/info_00080.txt" @requires_ds(output_00080) From 7a7807dd69a1ca4d9ddf19a64087ce7d4547c53c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 17:15:47 +0000 Subject: [PATCH 032/653] Move non-specific code into oct_container.pyx --- setup.py | 5 - yt/frontends/ramses/data_structures.py | 2 +- yt/frontends/ramses/io_utils.pyx | 2 +- yt/geometry/oct_container.pxd | 2 + yt/geometry/oct_container.pyx | 159 ++++++++++++++++++++++++- yt/geometry/oct_visitors.pxd | 8 +- yt/geometry/oct_visitors.pyx | 29 +---- yt/geometry/ramses_oct_container.pxd | 15 --- yt/geometry/ramses_oct_container.pyx | 157 ------------------------ 9 files changed, 167 insertions(+), 212 deletions(-) delete mode 100644 yt/geometry/ramses_oct_container.pxd delete mode 100644 yt/geometry/ramses_oct_container.pyx diff --git a/setup.py b/setup.py index 9123917a441..0c3f8fea9d0 100644 --- a/setup.py +++ b/setup.py @@ -97,11 +97,6 @@ def _compile( "yt/utilities/lib/tsearch.c"], include_dirs=["yt/utilities/lib"], libraries=std_libs), - Extension("yt.geometry.ramses_oct_container", - ["yt/geometry/ramses_oct_container.pyx", - "yt/utilities/lib/tsearch.c"], - include_dirs=["yt/utilities/lib"], - libraries=std_libs), Extension("yt.geometry.oct_visitors", ["yt/geometry/oct_visitors.pyx"], include_dirs=["yt/utilities/lib/"], diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 62b894541b8..3bf55d9e344 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -29,7 +29,7 @@ from .particle_handlers import get_particle_handlers from .field_handlers import get_field_handlers from yt.utilities.cython_fortran_utils import FortranFile as fpu -from yt.geometry.ramses_oct_container import \ +from yt.geometry.oct_container import \ RAMSESOctreeContainer from yt.arraytypes import blankRecordArray diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index e666b68deb3..1975a744eee 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -2,7 +2,7 @@ cimport cython cimport numpy as np import numpy as np from yt.utilities.cython_fortran_utils cimport FortranFile -from yt.geometry.ramses_oct_container cimport RAMSESOctreeContainer +from yt.geometry.oct_container cimport RAMSESOctreeContainer from yt.utilities.exceptions import YTIllDefinedAMRData ctypedef np.int32_t INT32_t diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index eb500d7fd56..5b6ece4c1b5 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -88,6 +88,8 @@ cdef class SparseOctreeContainer(OctreeContainer): cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3]) cdef np.int64_t ipos_to_key(self, int pos[3]) nogil +cdef class RAMSESOctreeContainer(OctreeContainer): + pass cdef extern from "tsearch.h" nogil: void *tsearch(const void *key, void **rootp, diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 63d80994bb6..3c5e450cd3f 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -10,10 +10,10 @@ Oct container cimport cython cimport numpy as np import numpy as np -from selection_routines cimport SelectorObject +from selection_routines cimport SelectorObject, AlwaysSelector from libc.math cimport floor, ceil -cimport selection_routines -from yt.geometry.oct_visitors cimport OctPadded +from yt.geometry.oct_visitors cimport OctPadded, NeighbourCellVisitor, StoreIndex, NeighbourVisitor + ORDER_MAX = 20 _ORDER_MAX = ORDER_MAX @@ -76,7 +76,7 @@ cdef class OctreeContainer: header['right_edge'], over_refine = header['over_refine'], partial_coverage = header['partial_coverage']) # NOTE: We do not allow domain/file indices to be specified. - cdef SelectorObject selector = selection_routines.AlwaysSelector(None) + cdef SelectorObject selector = AlwaysSelector(None) cdef oct_visitors.LoadOctree visitor visitor = oct_visitors.LoadOctree(obj, -1) cdef int i, j, k, n @@ -471,7 +471,7 @@ cdef class OctreeContainer: right_edge = (self.DRE[0], self.DRE[1], self.DRE[2]), over_refine = self.oref, partial_coverage = self.partial_coverage) - cdef SelectorObject selector = selection_routines.AlwaysSelector(None) + cdef SelectorObject selector = AlwaysSelector(None) # domain_id = -1 here, because we want *every* oct cdef oct_visitors.StoreOctree visitor visitor = oct_visitors.StoreOctree(self, -1) @@ -743,8 +743,155 @@ cdef class OctreeContainer: else: dest[i + offset] = source[file_inds[i], cell_inds[i]] + def fill_index(self, SelectorObject selector = AlwaysSelector(None)): + """Get the on-file index of each cell""" + cdef StoreIndex visitor + + cdef np.int64_t[:, :, :, :] cell_inds, + + cell_inds = np.full((self.nocts, 2, 2, 2), -1, dtype=np.int64) + + visitor = StoreIndex(self, -1) + visitor.cell_inds = cell_inds + + self.visit_all_octs(selector, visitor) + + return np.asarray(cell_inds) + + def neighbours_in_direction(self, int idim, int direction, + np.int64_t[:, :, :, :] cell_inds): + """Return index on file of all neighbours in a given direction""" + cdef SelectorObject always_selector = AlwaysSelector(None) + + # Store the index of the neighbour + cdef NeighbourVisitor n_visitor + cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.full_like(cell_inds, -1) + n_visitor = NeighbourVisitor(self, -1) + n_visitor.idim = idim + n_visitor.direction = direction + n_visitor.cell_inds = cell_inds + n_visitor.neigh_cell_inds = neigh_cell_inds + n_visitor.octree = self + n_visitor.last = -1 + self.visit_all_octs(always_selector, n_visitor) + + return np.asarray(neigh_cell_inds) + + @cython.boundscheck(False) + @cython.wraparound(False) + def copy_neighbour_data(self, + np.int64_t[:] icell, np.int64_t[:] nicell, + np.float64_t[:, :] input, np.float64_t[:, :] output, + int N): + """Copy data from neighbouring cell into current one""" + cdef int i + + for i in range(N): + if nicell[i] > -1 and icell[i] > -1: + output[icell[i], :] = input[nicell[i], :] + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def fill_level_with_domain( + self, int level, + np.uint8_t[:] levels, + np.uint8_t[:] cell_inds, + np.int64_t[:] file_inds, + np.int32_t[:] domains, + dict dest_fields, dict source_fields, + np.int32_t domain, + np.int64_t offset = 0 + ): + """Similar to fill_level but accepts a domain argument. + + This is particularly useful for frontends that have buffer zones at CPU boundaries. + These buffer oct cells have a different domain than the local one and + are usually not read, but one has to read them e.g. to compute ghost zones. + """ + cdef np.ndarray[np.float64_t, ndim=2] source + cdef np.ndarray[np.float64_t, ndim=1] dest + cdef int i, count + + for key in dest_fields: + dest = dest_fields[key] + source = source_fields[key] + count = 0 + for i in range(levels.shape[0]): + if levels[i] != level or domains[i] != domain: continue + count += 1 + if file_inds[i] < 0: + dest[i + offset] = np.nan + else: + dest[i + offset] = source[file_inds[i], cell_inds[i]] + return count + + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def file_index_octs_with_ghost_zones( + self, SelectorObject selector, int domain_id, + int num_cells = -1): + """Similar as file_index_octs, but return as well the level, cell index, + file index and domain of the neighbouring cells. + + Arguments + --------- + selector : SelectorObject + The selector object. It is expected to select all cells for a selected oct. + domain_id : int + The domain to select. Set to -1 to select all domains. + num_cells : int, optional + The total number of cells (accounting for the ghost zones) + + Returns + ------- + shifted + + +---+---+---+---+ + | | | | | + |---+---+---+---| + | | x | x | | + |---+---+---+---| + | | x | x | | + |---+---+---+---| + | | | | | + +---+---+---+---+ + + """ + cdef np.int64_t i + cdef int num_octs + if num_cells < 0: + num_octs = selector.count_octs(self, domain_id) + num_cells = num_octs * 4**3 + cdef NeighbourCellVisitor visitor + + cdef np.ndarray[np.uint8_t, ndim=1] levels + cdef np.ndarray[np.uint8_t, ndim=1] cell_inds + cdef np.ndarray[np.int64_t, ndim=1] file_inds + cdef np.ndarray[np.int32_t, ndim=1] domains + levels = np.full(num_cells, 255, dtype="uint8") + file_inds = np.full(num_cells, -1, dtype="int64") + cell_inds = np.full(num_cells, 8, dtype="uint8") + domains = np.full(num_cells, -1, dtype="int32") + + visitor = NeighbourCellVisitor(self, -1) + # output: level, file_ind and cell_ind of the neighbouring cells + visitor.levels = levels + visitor.file_inds = file_inds + visitor.cell_inds = cell_inds + visitor.domains = domains + # direction to explore and extra parameters of the visitor + visitor.octree = self + visitor.last = -1 + + # Compute indices + self.visit_all_octs(selector, visitor) + + return levels, cell_inds, file_inds, domains + def finalize(self): - cdef SelectorObject selector = selection_routines.AlwaysSelector(None) + cdef SelectorObject selector = AlwaysSelector(None) cdef oct_visitors.AssignDomainInd visitor visitor = oct_visitors.AssignDomainInd(self, 1) self.visit_all_octs(selector, visitor) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 84199f27230..7caa7fef47e 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -176,9 +176,9 @@ cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.int32_t[:] neigh_domain cdef class NeighbourCellVisitor(BaseNeighbourVisitor): - cdef np.uint8_t[:] shifted_levels - cdef np.int64_t[:] shifted_file_inds - cdef np.uint8_t[:] shifted_cell_inds - cdef np.int32_t[:] neigh_domain + cdef np.uint8_t[:] levels + cdef np.int64_t[:] file_inds + cdef np.uint8_t[:] cell_inds + cdef np.int32_t[:] domains cdef void set_neighbour_info(self, Oct *o, int ishift[3]) \ No newline at end of file diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index ebb14f7cee7..6d47d4d363d 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -585,26 +585,9 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): neigh_file_ind = -1 neigh_cell_ind = 8 - self.shifted_levels[self.index] = neigh_level - self.shifted_file_inds[self.index] = neigh_file_ind - self.shifted_cell_inds[self.index] = neigh_cell_ind - self.neigh_domain[self.index] = neigh_domain - - self.index += 1# Compute mapping from index in domain to index in domain with buffer zones -cdef class DomainMapper(BaseNeighbourVisitor): - # Intended to map all octs ids to only octs in local domain - # Should be used in conjunction with OctreeSubsetSelector - def __init__(self, OctreeContainer octree, int domain_id): - super(DomainMapper, self).__init__(octree, domain_id) - self.index_in = 0 - - #@cython.boundscheck(False) - #@cython.wraparound(False) - #@cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - if self.last != o.domain_ind: - self.last = o.domain_ind - if self.marked[self.index]: - self.mapping[self.index] = self.index_in - self.index_in += 1 - self.index += 1 + self.levels[self.index] = neigh_level + self.file_inds[self.index] = neigh_file_ind + self.cell_inds[self.index] = neigh_cell_ind + self.domains[self.index] = neigh_domain + + self.index += 1 \ No newline at end of file diff --git a/yt/geometry/ramses_oct_container.pxd b/yt/geometry/ramses_oct_container.pxd deleted file mode 100644 index 6d5dddd8997..00000000000 --- a/yt/geometry/ramses_oct_container.pxd +++ /dev/null @@ -1,15 +0,0 @@ -""" -RAMSES Oct definitions file - - - - -""" -from oct_container cimport SparseOctreeContainer, OctInfo -from .oct_visitors cimport OctVisitor, Oct, cind -from yt.utilities.lib.fp_utils cimport * -cimport numpy as np - -cdef class RAMSESOctreeContainer(SparseOctreeContainer): - cdef Oct neighbour_in_direction(self, OctInfo *oinfo, np.int64_t *nneighbors, - Oct *o, bint periodicity[3]) diff --git a/yt/geometry/ramses_oct_container.pyx b/yt/geometry/ramses_oct_container.pyx deleted file mode 100644 index 3d80bf4ffc5..00000000000 --- a/yt/geometry/ramses_oct_container.pyx +++ /dev/null @@ -1,157 +0,0 @@ -cimport cython -from oct_visitors cimport StoreIndex, NeighbourVisitor, NeighbourCellVisitor -from selection_routines cimport SelectorObject, AlwaysSelector, OctreeSubsetSelector -cimport numpy as np -import numpy as np - - -cdef class RAMSESOctreeContainer(SparseOctreeContainer): - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - cdef Oct neighbour_in_direction(self, OctInfo *oi, np.int64_t *nneighbours, Oct *o, - bint periodicity[3]): - pass - - def fill_index(self, SelectorObject selector = AlwaysSelector(None)): - # Get the on-file index of each cell - cdef StoreIndex visitor - - cdef np.int64_t[:, :, :, :] cell_inds, - - cell_inds = np.full((self.nocts, 2, 2, 2), -1, dtype=np.int64) - - visitor = StoreIndex(self, -1) - visitor.cell_inds = cell_inds - - self.visit_all_octs(selector, visitor) - - return np.asarray(cell_inds) - - def neighbours_in_direction(self, int idim, int direction, - np.int64_t[:, :, :, :] cell_inds): - """Return index on file of all neighbours in a given direction""" - cdef SelectorObject always_selector = AlwaysSelector(None) - - # Store the index of the neighbour - cdef NeighbourVisitor n_visitor - cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.full_like(cell_inds, -1) - n_visitor = NeighbourVisitor(self, -1) - n_visitor.idim = idim - n_visitor.direction = direction - n_visitor.cell_inds = cell_inds - n_visitor.neigh_cell_inds = neigh_cell_inds - n_visitor.octree = self - n_visitor.last = -1 - self.visit_all_octs(always_selector, n_visitor) - - return np.asarray(neigh_cell_inds) - - #@cython.boundscheck(False) - @cython.wraparound(False) - def copy_neighbour_data(self, - np.int64_t[:] icell, np.int64_t[:] nicell, - np.float64_t[:, :] input, np.float64_t[:, :] output, - int N,): - cdef int i - - for i in range(N): - if nicell[i] > -1 and icell[i] > -1: - output[icell[i], :] = input[nicell[i], :] - - def file_index_octs(self, SelectorObject selector, int domain_id, - num_cells = -1, spatial_offset=(0, 0, 0)): - - - cdef int i, idim, direction - cdef bint do_spatial_offset - cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds - cdef np.ndarray source_shifted - - do_spatial_offset = False - for i in range(3): - if spatial_offset[i] == 1 or spatial_offset[i] == -1: - idim = i - direction = spatial_offset[i] - if do_spatial_offset: - raise Exception( - 'ERROR: You can only specify one spatial offset direction, got [%s, %s, %s]!' % - (spatial_offset[0], spatial_offset[1], spatial_offset[2])) - do_spatial_offset = True - - if not do_spatial_offset: - return super(RAMSESOctreeContainer, self).file_index_octs( - selector, domain_id, num_cells) - else: - return self.file_index_octs_with_shift( - selector, domain_id, idim, direction, num_cells) - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - def fill_level_with_domain( - self, int level, - np.uint8_t[:] levels, - np.uint8_t[:] cell_inds, - np.int64_t[:] file_inds, - np.int32_t[:] domains, - dict dest_fields, dict source_fields, - np.int32_t domain, - np.int64_t offset = 0 - ): - cdef np.ndarray[np.float64_t, ndim=2] source - cdef np.ndarray[np.float64_t, ndim=1] dest - cdef np.float64_t tmp - cdef int i, count - cdef str key - for key in dest_fields: - dest = dest_fields[key] - source = source_fields[key] - count = 0 - for i in range(levels.shape[0]): - if levels[i] != level or domains[i] != domain: continue - count += 1 - if file_inds[i] < 0: - dest[i + offset] = np.nan - else: - # print(f'\t{i}: Accessing source {file_inds[i]}:{cell_inds[i]} source.shape=({source.shape[0]},{source.shape[1]})') - tmp =source[file_inds[i], cell_inds[i]] - dest[i + offset] = tmp # source[file_inds[i], cell_inds[i]] - return count - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.cdivision(True) - def file_index_octs_with_ghost_zones( - self, SelectorObject selector, int domain_id, - int num_cells = -1): - cdef np.int64_t i - cdef int num_octs - if num_cells < 0: - num_octs = selector.count_octs(self, domain_id) - num_cells = num_octs * 4**3 - cdef NeighbourCellVisitor visitor - - cdef np.ndarray[np.uint8_t, ndim=1] shifted_levels - cdef np.ndarray[np.uint8_t, ndim=1] shifted_cell_inds - cdef np.ndarray[np.int64_t, ndim=1] shifted_file_inds - cdef np.ndarray[np.int32_t, ndim=1] neigh_domain - shifted_levels = np.full(num_cells, 255, dtype="uint8") - shifted_file_inds = np.full(num_cells, -1, dtype="int64") - shifted_cell_inds = np.full(num_cells, 8, dtype="uint8") - neigh_domain = np.full(num_cells, -1, dtype="int32") - - visitor = NeighbourCellVisitor(self, -1) - # output: level, file_ind and cell_ind of the neighbouring cells - visitor.shifted_levels = shifted_levels - visitor.shifted_file_inds = shifted_file_inds - visitor.shifted_cell_inds = shifted_cell_inds - visitor.neigh_domain = neigh_domain - # direction to explore and extra parameters of the visitor - visitor.octree = self - visitor.last = -1 - - # Compute indices - self.visit_all_octs(selector, visitor) - - return shifted_levels, shifted_cell_inds, shifted_file_inds, neigh_domain \ No newline at end of file From 4fa0a57a657f5d390ccb5e76214a1c0f6e3b529b Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 17:21:49 +0000 Subject: [PATCH 033/653] Remove unused parent accessor --- yt/geometry/oct_container.pyx | 4 ---- yt/geometry/oct_visitors.pxd | 1 - 2 files changed, 5 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 3c5e450cd3f..bdf9eab8798 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -566,7 +566,6 @@ cdef class OctreeContainer: cdef int ind[3] cdef int nb = 0 cdef Oct *cur - cdef Oct *parent cdef np.float64_t pp[3] cdef np.float64_t cp[3] cdef np.float64_t dds[3] @@ -575,7 +574,6 @@ cdef class OctreeContainer: cdef OctAllocationContainer *cont = self.domains.get_cont(curdom - 1) cdef int initial = cont.n_assigned cdef int in_boundary = 0 - parent = NULL # How do we bootstrap ourselves? for p in range(no): #for every oct we're trying to add find the @@ -608,12 +606,10 @@ cdef class OctreeContainer: ind[i] = 1 cp[i] += dds[i]/2.0 # Check if it has not been allocated - parent = cur cur = self.next_child(curdom, ind, cur) # Now we should be at the right level cur.domain = curdom cur.file_ind = p - cur.parent = parent return cont.n_assigned - initial + nb def allocate_domains(self, domain_counts): diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 7caa7fef47e..177412b6132 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -16,7 +16,6 @@ cdef struct Oct: np.int64_t domain_ind # index within the global set of domains np.int64_t domain # (opt) addl int index Oct **children # Up to 8 long - Oct *parent cdef struct OctInfo: np.float64_t left_edge[3] From 9b4e5c7aadbbe70eac2abcd05e1f87b2577222d6 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 17:22:00 +0000 Subject: [PATCH 034/653] RAMSES octree is sparse, restore this --- yt/geometry/oct_container.pxd | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index 5b6ece4c1b5..75ada2b97e7 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -88,7 +88,7 @@ cdef class SparseOctreeContainer(OctreeContainer): cdef void key_to_ipos(self, np.int64_t key, np.int64_t pos[3]) cdef np.int64_t ipos_to_key(self, int pos[3]) nogil -cdef class RAMSESOctreeContainer(OctreeContainer): +cdef class RAMSESOctreeContainer(SparseOctreeContainer): pass cdef extern from "tsearch.h" nogil: From b5b2bfa134dfee2c4c5a4be4aee25f899094ab09 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jan 2020 17:48:50 +0000 Subject: [PATCH 035/653] Update docstring --- yt/geometry/oct_container.pyx | 27 +++++++++++++++++++++++---- 1 file changed, 23 insertions(+), 4 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index bdf9eab8798..2592573ab43 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -800,7 +800,7 @@ cdef class OctreeContainer: np.int64_t offset = 0 ): """Similar to fill_level but accepts a domain argument. - + This is particularly useful for frontends that have buffer zones at CPU boundaries. These buffer oct cells have a different domain than the local one and are usually not read, but one has to read them e.g. to compute ghost zones. @@ -828,7 +828,7 @@ cdef class OctreeContainer: def file_index_octs_with_ghost_zones( self, SelectorObject selector, int domain_id, int num_cells = -1): - """Similar as file_index_octs, but return as well the level, cell index, + """Similar as file_index_octs, but return as well the level, cell index, file index and domain of the neighbouring cells. Arguments @@ -838,11 +838,30 @@ cdef class OctreeContainer: domain_id : int The domain to select. Set to -1 to select all domains. num_cells : int, optional - The total number of cells (accounting for the ghost zones) + The total number of cells (accounting for a 1-cell thick ghost zone layer). Returns ------- - shifted + levels : uint8, shape (num_cells,) + The level of each cell of the super oct + cell_inds : uint8, shape (num_cells, ) + The index of each cell of the super oct within its own oct + file_inds : int64, shape (num_cells, ) + The on-file position of the cell. See notes below. + domains : int32, shape (num_cells) + The domain to which the cells belongs. See notes below. + + Notes + ----- + + The algorithm constructs a "super-oct" around each oct (see sketch below, + where the original oct cells are marked with an x). + + Note that for sparse octrees (such as RAMSES'), the neighbouring cells + may belong to another domain (this is stored in `domains`). If the dataset + provides buffer zones between domains (such as RAMSES), this may be stored + locally and can be accessed directly. + +---+---+---+---+ | | | | | From c3262c74a8ca7a2cacb77a4a85035bcc2f303d88 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 8 Feb 2020 14:14:02 +0000 Subject: [PATCH 036/653] Only query fcoords w/o ghost zones --- yt/frontends/ramses/data_structures.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 3bf55d9e344..1473d5e1a22 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -261,10 +261,9 @@ def fwidth(self): @property def fcoords(self): - fcoords = super(RAMSESDomainSubset, self).fcoords num_ghost_zones = self._num_ghost_zones if num_ghost_zones == 0: - return fcoords + return super(RAMSESDomainSubset, self).fcoords fcoords_base = self._base_domain.fcoords oct_selector = OctreeSubsetSelector(self) From 5e5af6fb700fa379ee62316b61f1b08e2a71e358 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Feb 2020 10:57:04 +0000 Subject: [PATCH 037/653] Assume periodicity --- yt/geometry/oct_visitors.pyx | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 6d47d4d363d..454b18c7ed6 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -372,6 +372,12 @@ cdef class BaseNeighbourVisitor(OctVisitor): else: fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] + # Assuming periodicity + if fcoords[i] < 0: + fcoords[i] += 1 + elif fcoords[i] > 1: + fcoords[i] -= 1 + # Use octree to find neighbour neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) @@ -507,6 +513,11 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): for i in range(3): c = (self.pos[i] << self.oref) fcoords[i] = (c + 0.5 + ishift[i]) * dx / self.octree.nn[i] + # Assuming periodicity + if fcoords[i] < 0: + fcoords[i] += 1 + elif fcoords[i] > 1: + fcoords[i] -= 1 local_oct &= (0 <= ishift[i] <= 1) other_oct = not local_oct From 7266517957cb9aa0c75ec8f263a541b7759e7e5a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Feb 2020 16:47:04 +0000 Subject: [PATCH 038/653] Add warning --- yt/frontends/ramses/data_structures.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 1473d5e1a22..4a65d43acfe 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -2,6 +2,7 @@ import numpy as np import stat import weakref +import warnings from collections import defaultdict from glob import glob @@ -193,6 +194,8 @@ def __init__(self, base_region, domain, ds, over_refine_factor=1, num_ghost_zone self._base_grid = base_grid if num_ghost_zones > 0: + if not all(ds.periodicity): + warnings.warn('Ghost zones will wrongly assume the domain to be periodic.') # Create a base domain *with no self._base_domain.fwidth base_domain = RAMSESDomainSubset(ds.all_data(), domain, ds, over_refine_factor) self._base_domain = base_domain From 9fc52dfd595864f1bfb0f469f08c92bbc060db51 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 12 Feb 2020 16:16:09 +0000 Subject: [PATCH 039/653] Vastly simplify code --- yt/frontends/ramses/data_structures.py | 33 ++--- yt/geometry/oct_container.pyx | 64 ++++++--- yt/geometry/oct_visitors.pxd | 10 +- yt/geometry/oct_visitors.pyx | 187 ++++++------------------- 4 files changed, 102 insertions(+), 192 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 4a65d43acfe..e14918c8d1e 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -268,36 +268,21 @@ def fcoords(self): if num_ghost_zones == 0: return super(RAMSESDomainSubset, self).fcoords - fcoords_base = self._base_domain.fcoords - oct_selector = OctreeSubsetSelector(self) oh = self.oct_handler - n_oct = fcoords_base.size // 3 // 8 - new_fcoords = np.full((n_oct, 4, 4, 4, 3), np.nan) + indices = oh.fill_index(self.selector).reshape(-1, 8) + oinds, cinds = oh.fill_octcellindex_neighbours(self.selector) - icell = oh.fill_index(oct_selector) - Ncell = icell.size + oinds = oinds.reshape(-1, 64) + cinds = cinds.reshape(-1, 64) - for idim in range(3): - for idir in (-1, 1): - ishift_all = [1, 1, 1] - ishift_all[idim] += idir + inds = indices[oinds, cinds] - nicell = oh.neighbours_in_direction(idim, idir, icell).reshape(-1) - - tmp = np.full((n_oct * 2 * 2 * 2, 3), np.nan) - - oh.copy_neighbour_data( - icell.reshape(-1), nicell, - fcoords_base, tmp, Ncell) - - _slice = tuple([slice(None)] + - [slice(i, i+2) for i in ishift_all] + - [slice(None)]) - new_fcoords[_slice] = tmp.reshape(n_oct, 2, 2, 2, -1) - new_fcoords = self.ds.arr(new_fcoords, fcoords_base.units) - return new_fcoords + fcoords = self.ds.arr( + oh.fcoords(self.selector)[inds].reshape(-1, 3), + 'unitary') + return fcoords def fill(self, fd, fields, selector, file_handler): if self._num_ghost_zones == 0: diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 2592573ab43..ae7421f73f4 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -12,7 +12,7 @@ cimport numpy as np import numpy as np from selection_routines cimport SelectorObject, AlwaysSelector from libc.math cimport floor, ceil -from yt.geometry.oct_visitors cimport OctPadded, NeighbourCellVisitor, StoreIndex, NeighbourVisitor +from yt.geometry.oct_visitors cimport OctPadded, NeighbourCellVisitor, StoreIndex, NeighbourCellIndexVisitor ORDER_MAX = 20 @@ -743,7 +743,7 @@ cdef class OctreeContainer: """Get the on-file index of each cell""" cdef StoreIndex visitor - cdef np.int64_t[:, :, :, :] cell_inds, + cdef np.int64_t[:, :, :, :] cell_inds cell_inds = np.full((self.nocts, 2, 2, 2), -1, dtype=np.int64) @@ -754,24 +754,48 @@ cdef class OctreeContainer: return np.asarray(cell_inds) - def neighbours_in_direction(self, int idim, int direction, - np.int64_t[:, :, :, :] cell_inds): - """Return index on file of all neighbours in a given direction""" - cdef SelectorObject always_selector = AlwaysSelector(None) - - # Store the index of the neighbour - cdef NeighbourVisitor n_visitor - cdef np.ndarray[np.int64_t, ndim=4] neigh_cell_inds = np.full_like(cell_inds, -1) - n_visitor = NeighbourVisitor(self, -1) - n_visitor.idim = idim - n_visitor.direction = direction - n_visitor.cell_inds = cell_inds - n_visitor.neigh_cell_inds = neigh_cell_inds - n_visitor.octree = self - n_visitor.last = -1 - self.visit_all_octs(always_selector, n_visitor) - - return np.asarray(neigh_cell_inds) + def fill_octcellindex_neighbours(self, SelectorObject selector, int num_octs = -1, domain_id = -1): + """Compute the oct and cell indices of all the cells within all selected octs, extended + by one cell in all directions (for ghost zones computations). + + Parameters + ---------- + selector : SelectorObject + Selector for the octs to compute neighbour of + num_octs : int, optional + The number of octs to read in + domain_id : int, optional + The domain to perform the selection over + + Returns + ------- + oct_inds : int64 ndarray (nocts*8, ) + The on-domain index of the octs containing each cell + cell_inds : uint8 array (nocts*8, ) + The index of the cell in its parent oct + + Note + ---- + oct_inds/cell_inds + """ + if num_octs == -1: + num_octs = selector.count_octs(self, domain_id) + + cdef NeighbourCellIndexVisitor visitor + + cdef np.uint8_t[:] cell_inds + cdef np.int64_t[:] oct_inds + + cell_inds = np.full(num_octs*4**3, 8, dtype=np.uint8) + oct_inds = np.full(num_octs*4**3, -1, dtype=np.int64) + + visitor = NeighbourCellIndexVisitor(self, -1) + visitor.cell_inds = cell_inds + visitor.domain_inds = oct_inds + + self.visit_all_octs(selector, visitor) + + return np.asarray(oct_inds), np.asarray(cell_inds) @cython.boundscheck(False) @cython.wraparound(False) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 177412b6132..791f2c82fe1 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -157,16 +157,15 @@ cdef class BaseNeighbourVisitor(OctVisitor): cdef OctreeContainer octree cdef OctInfo oi - cdef void set_neighbour_oct(self, Oct* o) - cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected) + cdef void set_neighbour_info(self, Oct *o, int ishift[3]) cdef inline np.uint8_t neighbour_rind(self): cdef int d = (1 << self.oref) return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) -cdef class NeighbourVisitor(BaseNeighbourVisitor): - cdef np.int64_t[:,:,:,:] cell_inds - cdef np.int64_t[:,:,:,:] neigh_cell_inds +cdef class NeighbourCellIndexVisitor(BaseNeighbourVisitor): + cdef np.uint8_t[:] cell_inds + cdef np.int64_t[:] domain_inds cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): cdef np.uint8_t[:] shifted_levels @@ -180,4 +179,3 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): cdef np.uint8_t[:] cell_inds cdef np.int32_t[:] domains - cdef void set_neighbour_info(self, Oct *o, int ishift[3]) \ No newline at end of file diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 454b18c7ed6..e35c8c14c8d 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -354,147 +354,6 @@ cdef class BaseNeighbourVisitor(OctVisitor): self.neigh_ind = np.zeros(3, np.int8) super(BaseNeighbourVisitor, self).__init__(octree, domain_id) - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void set_neighbour_oct(self, Oct *o): - cdef int i - cdef np.float64_t c, dx - cdef np.int64_t ipos - cdef np.float64_t fcoords[3] - cdef Oct *neighbour - dx = 1.0 / ((1 << self.oref) << self.level) - # Compute position of neighbouring cell - for i in range(3): - c = ((self.pos[i] << self.oref) + self.ind[i]) - if i == self.idim: - fcoords[i] = (c + 0.5 + 2*self.direction) * dx / self.octree.nn[i] - else: - fcoords[i] = (c + 0.5) * dx / self.octree.nn[i] - - # Assuming periodicity - if fcoords[i] < 0: - fcoords[i] += 1 - elif fcoords[i] > 1: - fcoords[i] -= 1 - - # Use octree to find neighbour - neighbour = self.octree.get(fcoords, &self.oi, max_level=self.level) - - # Extra step - compute cell position in neighbouring oct (and store in oi.ipos) - if self.oi.level == self.level - 1: - for i in range(3): - ipos = (((self.pos[i] << self.oref) + self.ind[i])) >> 1 - if i == self.idim: - ipos += self.direction - if (self.oi.ipos[i] << 1) == ipos: - self.oi.ipos[i] = 0 - else: - self.oi.ipos[i] = 1 - self.neighbour = neighbour - - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - @cython.cdivision(True) - cdef void get_neighbour_cell_index(self, Oct* o, np.uint8_t selected): - cdef int i - cdef bint other_oct # True if the neighbouring cell lies in another oct - cdef np.int64_t cell_ind - - # Compute information about neighbour once per oct - if self.last != o.domain_ind: - self.set_neighbour_oct(o) - self.last = o.domain_ind - - # Note that we provide an index even if the cell is not selected. - # if selected == 0: return -1 - # Index of neighbouring cell within its oct - for i in range(3): - if i == self.idim: - self.neigh_ind[i] = (self.ind[i] + self.direction) - other_oct = self.neigh_ind[i] < 0 or self.neigh_ind[i] > 1 - if other_oct: - # trick here: we want modulo with positive remainder, but neigh_ind may be negative so cast - # it to unsigned int *before* applying modulo. - self.neigh_ind[i] = (self.neigh_ind[i]) % 2 - else: - self.neigh_ind[i] = self.ind[i] - - self.other_oct = other_oct - if other_oct: - if self.neighbour != NULL: - if self.oi.level == self.level - 1: - # Position within neighbouring oct is stored in oi.ipos - for i in range(3): - self.neigh_ind[i] = self.oi.ipos[i] - elif self.oi.level != self.level: - print('This should not happen! %s %s' % (self.oi.level, self.level)) - self.neighbour = NULL - -# Store neighbouring cell index in current cell -cdef class NeighbourVisitor(BaseNeighbourVisitor): - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - cdef np.int64_t cell_ind - cdef Oct *neighbour_oct - cdef bint ok - - self.get_neighbour_cell_index(o, selected) - if not self.other_oct: - neighbour_oct = o - ok = True - elif self.neighbour != NULL: - neighbour_oct = self.neighbour - ok = True - else: - ok = False - - if ok: - cell_ind = self.cell_inds[neighbour_oct.domain_ind, self.neigh_ind[2], self.neigh_ind[1], self.neigh_ind[0]] - else: - cell_ind = -1 - - self.neigh_cell_inds[o.domain_ind, self.ind[2], self.ind[1], self.ind[0]] = cell_ind - -# Store file position + cell of neighbouring cell in current cell -cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): - @cython.boundscheck(False) - @cython.wraparound(False) - @cython.initializedcheck(False) - cdef void visit(self, Oct* o, np.uint8_t selected): - cdef np.int64_t neigh_file_ind - cdef np.uint8_t neigh_cell_ind - cdef np.int32_t neigh_domain - - if selected == 0: return - # Note: only selected items have an index - self.get_neighbour_cell_index(o, selected) - if not self.other_oct: - neigh_domain = o.domain - neigh_file_ind = o.file_ind - neigh_cell_ind = self.neighbour_rind() - elif self.neighbour != NULL: - neigh_domain = self.neighbour.domain - neigh_file_ind = self.neighbour.file_ind - neigh_cell_ind = self.neighbour_rind() - else: - neigh_domain = -1 - neigh_file_ind = -1 - neigh_cell_ind = 8 - - self.shifted_levels[self.index] = self.level - # Note: we store the local level, not the remote one - self.shifted_file_inds[self.index] = neigh_file_ind - self.shifted_cell_inds[self.index] = neigh_cell_ind - self.neigh_domain[self.index] = neigh_domain - - self.index += 1 - -# Store file position + cell of neighbouring cell in current cell -cdef class NeighbourCellVisitor(BaseNeighbourVisitor): @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @@ -555,6 +414,49 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): print('This should not happen! %s %s' % (self.oi.level, self.level)) self.neighbour = NULL +# Store neighbouring cell index in current cell +cdef class NeighbourCellIndexVisitor(BaseNeighbourVisitor): + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.initializedcheck(False) + cdef void visit(self, Oct* o, np.uint8_t selected): + cdef int i, j, k + cdef int ishift[3] + cdef np.uint8_t neigh_cell_ind + cdef np.int64_t neigh_domain_ind + if selected == 0: return + # Work at oct level + if self.last == o.domain_ind: return + + self.last = o.domain_ind + + # Loop over cells in and directly around oct + for i in range(-1, 3): + ishift[0] = i + for j in range(-1, 3): + ishift[1] = j + for k in range(-1, 3): + ishift[2] = k + self.set_neighbour_info(o, ishift) + + if not self.other_oct: + neigh_domain_ind = o.domain_ind + neigh_cell_ind = self.neighbour_rind() + elif self.neighbour != NULL: + neigh_domain_ind = self.neighbour.domain_ind + neigh_cell_ind = self.neighbour_rind() + else: + neigh_domain_ind = -1 + neigh_cell_ind = 8 + + self.cell_inds[self.index] = neigh_cell_ind + self.domain_inds[self.index] = neigh_domain_ind + + self.index += 1 + +# Store file position + cell of neighbouring cell in current cell +cdef class NeighbourCellVisitor(BaseNeighbourVisitor): + @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @@ -601,4 +503,5 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): self.cell_inds[self.index] = neigh_cell_ind self.domains[self.index] = neigh_domain - self.index += 1 \ No newline at end of file + self.index += 1 + From 8ec03540d3db06f8cb094e98a980108126ab7a0a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 12 Feb 2020 16:16:21 +0000 Subject: [PATCH 040/653] Remove useless import --- yt/frontends/ramses/data_structures.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index e14918c8d1e..b71ccef109f 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -11,7 +11,6 @@ setdefaultattr from yt.geometry.oct_geometry_handler import \ OctreeIndex -from yt.geometry.selection_routines import OctreeSubsetSelector from yt.geometry.geometry_handler import \ YTDataChunk From aa2605dfc406f3f6da4d083d55fa45ec2935c27d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 12 Feb 2020 16:25:22 +0000 Subject: [PATCH 041/653] Remove unused visitor --- yt/geometry/oct_visitors.pxd | 6 ------ 1 file changed, 6 deletions(-) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 791f2c82fe1..7a77a653299 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -167,12 +167,6 @@ cdef class NeighbourCellIndexVisitor(BaseNeighbourVisitor): cdef np.uint8_t[:] cell_inds cdef np.int64_t[:] domain_inds -cdef class FillFileIndicesRNeighbour(BaseNeighbourVisitor): - cdef np.uint8_t[:] shifted_levels - cdef np.int64_t[:] shifted_file_inds - cdef np.uint8_t[:] shifted_cell_inds - cdef np.int32_t[:] neigh_domain - cdef class NeighbourCellVisitor(BaseNeighbourVisitor): cdef np.uint8_t[:] levels cdef np.int64_t[:] file_inds From 11285a006ae8ce726ef49ff365681af832c19e7a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 19 Feb 2020 10:54:22 +0000 Subject: [PATCH 042/653] Remove useless method --- yt/geometry/oct_container.pyx | 13 ------------- 1 file changed, 13 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index ae7421f73f4..36d4b8dfb51 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -797,19 +797,6 @@ cdef class OctreeContainer: return np.asarray(oct_inds), np.asarray(cell_inds) - @cython.boundscheck(False) - @cython.wraparound(False) - def copy_neighbour_data(self, - np.int64_t[:] icell, np.int64_t[:] nicell, - np.float64_t[:, :] input, np.float64_t[:, :] output, - int N): - """Copy data from neighbouring cell into current one""" - cdef int i - - for i in range(N): - if nicell[i] > -1 and icell[i] > -1: - output[icell[i], :] = input[nicell[i], :] - @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) From 6fde2e5823ba9d76065667c7c764a0bf45564403 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 29 May 2020 09:42:01 +0100 Subject: [PATCH 043/653] Address comments --- yt/frontends/ramses/data_structures.py | 7 ++++--- yt/frontends/ramses/io_utils.pyx | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index b71ccef109f..5dbcf6e0319 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -254,7 +254,7 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo def fwidth(self): fwidth = super(RAMSESDomainSubset, self).fwidth if self._num_ghost_zones > 0: - fwidth = super(RAMSESDomainSubset, self).fwidth.reshape(-1, 8, 3) + fwidth = fwidth.reshape(-1, 8, 3) n_oct = fwidth.shape[0] new_fwidth = np.zeros((n_oct, self.nz**3, 3), dtype=fwidth.dtype) new_fwidth[:, :, :] = fwidth[:, 0:1, :] @@ -344,14 +344,15 @@ def _detect_output_fields(self): self.field_list = self.particle_field_list + self.fluid_field_list def _identify_base_chunk(self, dobj): - ngz = dobj._num_ghost_zones if getattr(dobj, "_chunk_info", None) is None: domains = [dom for dom in self.domains if dom.included(dobj.selector)] base_region = getattr(dobj, "base_region", dobj) if len(domains) > 1: mylog.debug("Identified %s intersecting domains", len(domains)) - subsets = [RAMSESDomainSubset(base_region, domain, self.dataset, num_ghost_zones=ngz) + subsets = [RAMSESDomainSubset( + base_region, domain, self.dataset, + num_ghost_zones=dobj._num_ghost_zones) for domain in domains] dobj._chunk_info = subsets dobj._current_chunk = list(self._chunk_all(dobj))[0] diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 1975a744eee..d745655cbfe 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -75,7 +75,7 @@ def read_amr(FortranFile f, dict headers, if n > 0: max_level = max(ilevel - min_level, max_level) if n != ng: - raise Exception('Expected %s octs, got %s' % (ng, n)) + raise ValueError('Expected %s octs, got %s' % (ng, n)) return max_level From b91838990279b03466f5c0caa5eab576eaae256e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 14:23:38 +0100 Subject: [PATCH 044/653] Fix mistake --- yt/fields/fluid_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index ce927240a2a..f9abc0785d7 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -218,7 +218,7 @@ def func(field, data): f = field_data[slice_3dr]/ds[slice_3d] f -= field_data[slice_3dl]/ds[slice_3d] new_field = np.zeros_like(data[grad_field], dtype=np.float64) - new_field = data.ds.arr(new_field, vr.units / dt.units) + new_field = data.ds.arr(new_field, field_data.units / ds.units) new_field[slice_3d] = f if block_reorder == 'F': From 7978ddcaeb3844b0cddc0cf94e990275fd479a71 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 17 Jun 2020 14:23:50 +0100 Subject: [PATCH 045/653] No need to format warning --- yt/fields/fluid_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index f9abc0785d7..25134b65f40 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -189,7 +189,7 @@ def setup_gradient_fields(registry, grad_field, field_units, slice_info = None): geom = registry.ds.geometry if is_curvilinear(geom): - mylog.warning("In %s geometry, gradient fields may contain artifacts near cartesian axes." % geom) + mylog.warning("In %s geometry, gradient fields may contain artifacts near cartesian axes.", geom) assert(isinstance(grad_field, tuple)) ftype, fname = grad_field From 86a314e51d077146068ee9f6d34f9f0b1be1ee43 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 23 Jun 2020 10:01:30 +0100 Subject: [PATCH 046/653] regenerate answers --- tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 72f35e51cea..8df95daf9ec 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -105,7 +105,7 @@ answer_tests: - yt/frontends/boxlib/tests/test_outputs.py:test_NyxDataset - yt/frontends/boxlib/tests/test_outputs.py:test_WarpXDataset - local_ramses_002: + local_ramses_003: - yt/frontends/ramses/tests/test_outputs.py local_ytdata_006: From 6d4adf8f9d6fea44c8dfb224247c20d7dc113ebe Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 23 Jun 2020 15:12:20 +0100 Subject: [PATCH 047/653] Support non periodic boundaries? --- yt/frontends/ramses/io_utils.pyx | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index d745655cbfe..edc17da082e 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -74,8 +74,6 @@ def read_amr(FortranFile f, dict headers, count_boundary = 1) if n > 0: max_level = max(ilevel - min_level, max_level) - if n != ng: - raise ValueError('Expected %s octs, got %s' % (ng, n)) return max_level @@ -103,8 +101,8 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n skip_len = twotondim * nvar # It goes: level, CPU, 8-variable (1 oct) - offset = np.full((ncpu, n_levels), -1, dtype=np.int64) - level_count = np.zeros((ncpu, n_levels), dtype=np.int64) + offset = np.full((ncpu_and_bound, n_levels), -1, dtype=np.int64) + level_count = np.zeros((ncpu_and_bound, n_levels), dtype=np.int64) cdef np.int64_t[:,:] level_count_view = level_count cdef np.int64_t[:,:] offset_view = offset From 75f19fe5861cf4c278c1ef647c4b4ad6cbceb44c Mon Sep 17 00:00:00 2001 From: Madicken Munk Date: Thu, 25 Jun 2020 14:20:38 -0500 Subject: [PATCH 048/653] deprecate simulation answer testing framework --- yt/utilities/answer_testing/framework.py | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 2397077877c..369d856649d 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -21,7 +21,8 @@ from matplotlib.testing.compare import compare_images from nose.plugins import Plugin from yt.funcs import \ - get_pbar + get_pbar, \ + issue_deprecation_warning from yt.testing import \ assert_equal, \ assert_allclose_units, \ @@ -227,7 +228,7 @@ class AnswerTestLocalStorage(AnswerTestStorage): def dump(self, result_storage): # The 'tainted' attribute is automatically set to 'True' # if the dataset required for an answer test is missing - # (see can_run_ds() and can_run_sim()). + # (see can_run_ds(). # This logic check prevents creating a shelve with empty answers. storage_is_tainted = result_storage.get('tainted', False) if self.answer_name is None or storage_is_tainted: @@ -281,6 +282,9 @@ def can_run_ds(ds_fn, file_check = False): return result_storage is not None def can_run_sim(sim_fn, sim_type, file_check = False): + issue_deprecation_warning("This function is no longer used in the " + + "yt project testing framework and is " + + "targeted for deprecation.") result_storage = AnswerTestingTest.result_storage if isinstance(sim_fn, SimulationTimeSeries): return result_storage is not None @@ -965,6 +969,9 @@ def compare(self, new_result, old_result): def requires_sim(sim_fn, sim_type, big_data = False, file_check = False): + issue_deprecation_warning("This function is no longer used in the " + + "yt project testing framework and is " + + "targeted for deprecation.") def ffalse(func): return lambda: None def ftrue(func): From 6c318dcc263ea9ef5edc8c8881f57bc6f78468df Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:38:37 +0100 Subject: [PATCH 049/653] Add kwa to select calls --- yt/data_objects/data_containers.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 4b9270c60ac..f2f98892bfa 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -340,7 +340,11 @@ def _generate_spatial_fluid(self, field, ngz): outputs.append(rv) ind = 0 # Does this work with mesh? with o._activate_cache(): - ind += o.select(self.selector, self[field], rv, ind) + ind += o.select( + self.selector, + source=self[field], + dest=rv, + offset=ind) else: chunks = self.index._chunk(self, "spatial", ngz = ngz) for i, chunk in enumerate(chunks): @@ -355,8 +359,9 @@ def _generate_spatial_fluid(self, field, ngz): data = gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz] ind += wogz.select( self.selector, - data, - rv, ind) + source=data, + dest=rv, + offset=ind) if accumulate: rv = uconcatenate(outputs) return rv From 236134b5d90bb5cd17bb50fbde8e57a33f742aae Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:42:16 +0100 Subject: [PATCH 050/653] =?UTF-8?q?ds=20=E2=86=92=20dx?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- yt/fields/fluid_fields.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 25134b65f40..b421670c7ab 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -210,15 +210,15 @@ def func(field, data): field_data = data[grad_field].swapaxes(0, 2) else: field_data = data[grad_field] - ds = div_fac * data[ftype, "d%s" % ax] + dx = div_fac * data[ftype, f"d{ax}"] if ax == "theta": - ds *= data[ftype, "r"] + dx *= data[ftype, "r"] if ax == "phi": - ds *= data[ftype, "r"] * np.sin(data[ftype, "theta"]) - f = field_data[slice_3dr]/ds[slice_3d] - f -= field_data[slice_3dl]/ds[slice_3d] + dx *= data[ftype, "r"] * np.sin(data[ftype, "theta"]) + f = field_data[slice_3dr]/dx[slice_3d] + f -= field_data[slice_3dl]/dx[slice_3d] new_field = np.zeros_like(data[grad_field], dtype=np.float64) - new_field = data.ds.arr(new_field, field_data.units / ds.units) + new_field = data.ds.arr(new_field, field_data.units / dx.units) new_field[slice_3d] = f if block_reorder == 'F': From 550e887213150398aba68530831289ad8f729352 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:45:36 +0100 Subject: [PATCH 051/653] Return "arr" not "tmp" variable --- yt/fields/geometric_fields.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/fields/geometric_fields.py b/yt/fields/geometric_fields.py index d6c73b3b3b7..3a8ec9d55d3 100644 --- a/yt/fields/geometric_fields.py +++ b/yt/fields/geometric_fields.py @@ -93,11 +93,11 @@ def _zeros(field, data): def _ones(field, data): """Returns one for all cells""" - arr = np.ones(data.ires.shape, dtype="float64") - tmp = data.apply_units(arr, field.units) + tmp = np.ones(data.ires.shape, dtype="float64") + arr = data.apply_units(tmp, field.units) if data._spatial: - return data._reshape_vals(tmp) - return tmp + return data._reshape_vals(arr) + return arr registry.add_field(("index", "ones"), sampling_type="cell", From 0fc513160d6a7fdd63cadc34790e50149f54f049 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:46:11 +0100 Subject: [PATCH 052/653] Each line for its argument --- yt/geometry/oct_container.pyx | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 36d4b8dfb51..8b9e88e055a 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -806,7 +806,8 @@ cdef class OctreeContainer: np.uint8_t[:] cell_inds, np.int64_t[:] file_inds, np.int32_t[:] domains, - dict dest_fields, dict source_fields, + dict dest_fields, + dict source_fields, np.int32_t domain, np.int64_t offset = 0 ): From f7d9e4ae5ac57f064ccc511ed077df52b7df8dca Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:50:33 +0100 Subject: [PATCH 053/653] Minor nits in docstrings --- yt/geometry/oct_container.pyx | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 8b9e88e055a..4ea1e38da4c 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -840,13 +840,13 @@ cdef class OctreeContainer: def file_index_octs_with_ghost_zones( self, SelectorObject selector, int domain_id, int num_cells = -1): - """Similar as file_index_octs, but return as well the level, cell index, - file index and domain of the neighbouring cells. + """Similar as file_index_octs, but returns the level, cell index, + file index and domain of the neighbouring cells as well. Arguments --------- selector : SelectorObject - The selector object. It is expected to select all cells for a selected oct. + The selector object. It is expected to select all cells for a given oct. domain_id : int The domain to select. Set to -1 to select all domains. num_cells : int, optional From 9ca0d0e13af6a4348660fa74b9ba17154680d363 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:50:59 +0100 Subject: [PATCH 054/653] Using python-friendly syntax --- yt/geometry/oct_container.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 4ea1e38da4c..ea8c64058f8 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -825,8 +825,8 @@ cdef class OctreeContainer: dest = dest_fields[key] source = source_fields[key] count = 0 - for i in range(levels.shape[0]): - if levels[i] != level or domains[i] != domain: continue + for i, (lev, dom) in enumerate(zip(levels, domains)): + if lev != level or dom != domain: continue count += 1 if file_inds[i] < 0: dest[i + offset] = np.nan From a92d4778323beea20ec798cb43b6157173208dbc Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 10:55:40 +0100 Subject: [PATCH 055/653] Remove duplicates in testing file --- yt/frontends/ramses/tests/test_outputs.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 1538344e694..38d491cab71 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -399,7 +399,6 @@ def check_unit(array, unit): check_unit(ds.r[('gas','Electron_number_density')],'cm**(-3)') -ramses_rt = "ramses_rt_00088/output_00088/info_00088.txt" @requires_file(ramses_rt) def test_ramses_mixed_files(): # Test that one can use derived fields that depend on different @@ -462,7 +461,6 @@ def test_magnetic_field_aliasing(): assert ('gas',field) in ds.derived_field_list ad[('gas',field)] -output_00080 = "output_00080/info_00080.txt" @requires_file(output_00080) def test_field_accession(): ds = yt.load(output_00080) From 0bf76b2808ab00471a1129c96ca34ec782f65d6f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 11:00:02 +0100 Subject: [PATCH 056/653] _block_reorder -> _block_order --- yt/data_objects/octree_subset.py | 6 +++--- yt/fields/fluid_fields.py | 8 +++++--- yt/frontends/ramses/data_structures.py | 2 +- 3 files changed, 9 insertions(+), 7 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index dbce321e657..c66359bf943 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -35,7 +35,7 @@ class OctreeSubset(YTSelectionContainer): _con_args = ('base_region', 'domain', 'ds') _domain_offset = 0 _cell_count = -1 - _block_reorder = None + _block_order = 'C' def __init__(self, base_region, domain, ds, over_refine_factor = 1, num_ghost_zones = 0): super(OctreeSubset, self).__init__(ds, None) @@ -456,8 +456,8 @@ def __init__(self, ind, block_slice): def __getitem__(self, key): bs = self.block_slice rv = bs.octree_subset[key][:,:,:,self.ind].T - if bs.octree_subset._block_reorder: - rv = rv.copy(order=bs.octree_subset._block_reorder) + if bs.octree_subset._block_order: + rv = rv.copy(order=bs.octree_subset._block_order) return rv @property diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index b421670c7ab..44af8c961ad 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -205,8 +205,10 @@ def grad_func(axi, ax): slice_3dl = slice_3d[:axi] + (sl_left,) + slice_3d[axi+1:] slice_3dr = slice_3d[:axi] + (sl_right,) + slice_3d[axi+1:] def func(field, data): - block_reorder = getattr(data, '_block_reorder', None) - if block_reorder == 'F': + block_order = data._block_order + if block_order == 'F': + # Fortran-ordering: we need to swap axes here and + # reswap below field_data = data[grad_field].swapaxes(0, 2) else: field_data = data[grad_field] @@ -221,7 +223,7 @@ def func(field, data): new_field = data.ds.arr(new_field, field_data.units / dx.units) new_field[slice_3d] = f - if block_reorder == 'F': + if block_order == 'F': new_field = new_field.swapaxes(0, 2) return new_field diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 1e24bdfbc7f..217f5192621 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -181,7 +181,7 @@ def included(self, selector): class RAMSESDomainSubset(OctreeSubset): _domain_offset = 1 - _block_reorder = "F" + _block_order = "F" _base_domain = None From 19058d8c88fbde284dae230232acb24043e66929 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 11:03:43 +0100 Subject: [PATCH 057/653] Field detector does not have a block_order argument --- yt/fields/fluid_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index 44af8c961ad..ddb2d0fe257 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -205,7 +205,7 @@ def grad_func(axi, ax): slice_3dl = slice_3d[:axi] + (sl_left,) + slice_3d[axi+1:] slice_3dr = slice_3d[:axi] + (sl_right,) + slice_3d[axi+1:] def func(field, data): - block_order = data._block_order + block_order = getattr(data, '_block_order', 'C') if block_order == 'F': # Fortran-ordering: we need to swap axes here and # reswap below From 9645cb0dabcb36dabf493ce243416648e258e258 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 2 Jul 2020 09:31:13 -0500 Subject: [PATCH 058/653] Allow in_conda_env to look for conda-forge --- setupext.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setupext.py b/setupext.py index 92c456babe0..4d3efa3ccb0 100644 --- a/setupext.py +++ b/setupext.py @@ -132,7 +132,7 @@ def check_for_pyembree(): return os.path.dirname(fn) def in_conda_env(): - return any(s in sys.version for s in ("Anaconda", "Continuum")) + return any(s in sys.version for s in ("Anaconda", "Continuum", "conda-forge")) def read_embree_location(): ''' From 6d2b43978f53e8425953270f20418ef5c0229483 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 3 Jul 2020 09:30:56 +0100 Subject: [PATCH 059/653] Reorder if necessary --- yt/data_objects/octree_subset.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index c66359bf943..cd006c1be5b 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -455,9 +455,9 @@ def __init__(self, ind, block_slice): def __getitem__(self, key): bs = self.block_slice - rv = bs.octree_subset[key][:,:,:,self.ind].T - if bs.octree_subset._block_order: - rv = rv.copy(order=bs.octree_subset._block_order) + rv = np.require( + bs.octree_subset[key][:,:,:,self.ind].T, + requirements=bs.octree_subset._block_order) return rv @property From 3c8b75ccf6d2a84588c487ccdbdd68fda6cfc631 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 17 Jul 2020 10:01:31 +0100 Subject: [PATCH 060/653] Take comments into account --- yt/frontends/ramses/data_structures.py | 25 +++++++++++++------ yt/frontends/ramses/io_utils.pyx | 1 + yt/geometry/coordinates/coordinate_handler.py | 4 +-- yt/geometry/oct_container.pyx | 21 +++++++--------- yt/geometry/oct_visitors.pxd | 14 +++++------ yt/geometry/oct_visitors.pyx | 24 ++++++++++++++++-- 6 files changed, 58 insertions(+), 31 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a0f1babab14..47ff2992790 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -1,7 +1,6 @@ import os import numpy as np import weakref -import warnings from collections import defaultdict from glob import glob @@ -193,10 +192,15 @@ def __init__(self, base_region, domain, ds, over_refine_factor=1, num_ghost_zone if num_ghost_zones > 0: if not all(ds.periodicity): - warnings.warn('Ghost zones will wrongly assume the domain to be periodic.') + mylog.warn('Ghost zones will wrongly assume the domain to be periodic.') # Create a base domain *with no self._base_domain.fwidth base_domain = RAMSESDomainSubset(ds.all_data(), domain, ds, over_refine_factor) self._base_domain = base_domain + elif num_ghost_zones < 0: + raise RuntimeError( + 'Cannot initialize a domain subset with a negative number of ghost zones,' + ' was called with num_ghost_zones=%s' % num_ghost_zones + ) def _fill_no_ghostzones(self, fd, fields, selector, file_handler): ndim = self.ds.dimensionality @@ -214,9 +218,10 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') + cpu_list = [self.domain_id - 1] fill_hydro(fd, file_handler.offset, file_handler.level_count, - [self.domain_id-1], + cpu_list, levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler) @@ -240,9 +245,10 @@ def _fill_with_ghostzones(self, fd, fields, selector, file_handler, num_ghost_zo # Initializing data container for field in fields: tr[field] = np.zeros(cell_count, 'float64') + cpu_list = list(range(ncpu)) fill_hydro(fd, file_handler.offset, file_handler.level_count, - list(range(ncpu)), + cpu_list, levels, cell_inds, file_inds, ndim, all_fields, fields, tr, oct_handler, @@ -255,6 +261,9 @@ def fwidth(self): if self._num_ghost_zones > 0: fwidth = fwidth.reshape(-1, 8, 3) n_oct = fwidth.shape[0] + # new_fwidth contains the fwidth of the oct+ghost zones + # this is a constant array in each oct, so we simply copy + # the oct value using numpy fancy-indexing new_fwidth = np.zeros((n_oct, self.nz**3, 3), dtype=fwidth.dtype) new_fwidth[:, :, :] = fwidth[:, 0:1, :] fwidth = new_fwidth.reshape(-1, 3) @@ -269,12 +278,12 @@ def fcoords(self): oh = self.oct_handler indices = oh.fill_index(self.selector).reshape(-1, 8) - oinds, cinds = oh.fill_octcellindex_neighbours(self.selector) + oct_inds, cell_inds = oh.fill_octcellindex_neighbours(self.selector) - oinds = oinds.reshape(-1, 64) - cinds = cinds.reshape(-1, 64) + oct_inds = oct_inds.reshape(-1, 64) + cell_inds = cell_inds.reshape(-1, 64) - inds = indices[oinds, cinds] + inds = indices[oct_inds, cell_inds] fcoords = self.ds.arr( oh.fcoords(self.selector)[inds].reshape(-1, 3), diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 39d4d86d43d..21abdb1609e 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -172,6 +172,7 @@ def fill_hydro(FortranFile f, f.seek(offset) tmp = {} # Initalize temporary data container for io + # note: we use Fortran ordering to reflect the in-file ordering for field in all_fields: tmp[field] = np.empty((nc, twotondim), dtype="float64", order='F') diff --git a/yt/geometry/coordinates/coordinate_handler.py b/yt/geometry/coordinates/coordinate_handler.py index bb24accfad2..d5521e804ac 100644 --- a/yt/geometry/coordinates/coordinate_handler.py +++ b/yt/geometry/coordinates/coordinate_handler.py @@ -7,7 +7,7 @@ fix_unitary, \ iterable from yt.units.yt_array import \ - YTArray, YTQuantity, unyt_array + YTArray, YTQuantity from yt.utilities.exceptions import \ YTCoordinateNotImplemented, \ YTInvalidWidthError @@ -213,7 +213,7 @@ def sanitize_width(self, axis, width, depth): width = (w[0], w[1]) elif iterable(width): width = validate_iterable_width(width, self.ds) - elif isinstance(width, (YTQuantity, unyt_array)): + elif isinstance(width, YTQuantity): width = (width, width) elif isinstance(width, Number): width = (self.ds.quan(width, 'code_length'), diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index a6498556aff..db2e13490bf 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -664,13 +664,10 @@ cdef class OctreeContainer: cdef np.ndarray[np.int64_t, ndim=1] file_inds if num_cells < 0: num_cells = selector.count_oct_cells(self, domain_id) - levels = np.zeros(num_cells, dtype="uint8") - file_inds = np.zeros(num_cells, dtype="int64") - cell_inds = np.zeros(num_cells, dtype="uint8") - for i in range(num_cells): - levels[i] = 255 - file_inds[i] = -1 - cell_inds[i] = 8 + # Initialize variables with dummy values + levels = np.full(num_cells, 255, dtype="uint8") + file_inds = np.full(num_cells, -1, dtype="int64") + cell_inds = np.full(num_cells, 8, dtype="uint8") cdef oct_visitors.FillFileIndicesO visitor_o cdef oct_visitors.FillFileIndicesR visitor_r if self.fill_style == "r": @@ -735,8 +732,8 @@ cdef class OctreeContainer: for key in dest_fields: dest = dest_fields[key] source = source_fields[key] - for i in range(levels.shape[0]): - if levels[i] != level: continue + for i, lvl in enumerate(levels): + if lvl != level: continue if file_inds[i] < 0: dest[i + offset] = np.nan else: @@ -774,7 +771,7 @@ cdef class OctreeContainer: ------- oct_inds : int64 ndarray (nocts*8, ) The on-domain index of the octs containing each cell - cell_inds : uint8 array (nocts*8, ) + cell_inds : uint8 ndarray (nocts*8, ) The index of the cell in its parent oct Note @@ -786,8 +783,8 @@ cdef class OctreeContainer: cdef NeighbourCellIndexVisitor visitor - cdef np.uint8_t[:] cell_inds - cdef np.int64_t[:] oct_inds + cdef np.uint8_t[::1] cell_inds + cdef np.int64_t[::1] oct_inds cell_inds = np.full(num_octs*4**3, 8, dtype=np.uint8) oct_inds = np.full(num_octs*4**3, -1, dtype=np.int64) diff --git a/yt/geometry/oct_visitors.pxd b/yt/geometry/oct_visitors.pxd index 7a77a653299..63920480cf6 100644 --- a/yt/geometry/oct_visitors.pxd +++ b/yt/geometry/oct_visitors.pxd @@ -151,7 +151,7 @@ cdef class StoreIndex(OctVisitor): cdef class BaseNeighbourVisitor(OctVisitor): cdef int idim # 0,1,2 for x,y,z cdef int direction # +1 for +x, -1 for -x - cdef np.int8_t[:] neigh_ind + cdef np.uint8_t neigh_ind[3] cdef bint other_oct cdef Oct *neighbour cdef OctreeContainer octree @@ -164,12 +164,12 @@ cdef class BaseNeighbourVisitor(OctVisitor): return (((self.neigh_ind[2]*d)+self.neigh_ind[1])*d+self.neigh_ind[0]) cdef class NeighbourCellIndexVisitor(BaseNeighbourVisitor): - cdef np.uint8_t[:] cell_inds - cdef np.int64_t[:] domain_inds + cdef np.uint8_t[::1] cell_inds + cdef np.int64_t[::1] domain_inds cdef class NeighbourCellVisitor(BaseNeighbourVisitor): - cdef np.uint8_t[:] levels - cdef np.int64_t[:] file_inds - cdef np.uint8_t[:] cell_inds - cdef np.int32_t[:] domains + cdef np.uint8_t[::1] levels + cdef np.int64_t[::1] file_inds + cdef np.uint8_t[::1] cell_inds + cdef np.int32_t[::1] domains diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index 220da61f014..cd951d26271 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -353,7 +353,9 @@ cdef class StoreIndex(OctVisitor): cdef class BaseNeighbourVisitor(OctVisitor): def __init__(self, OctreeContainer octree, int domain_id = -1): self.octree = octree - self.neigh_ind = np.zeros(3, np.int8) + self.neigh_ind[0] = 0 + self.neigh_ind[1] = 0 + self.neigh_ind[2] = 0 super(BaseNeighbourVisitor, self).__init__(octree, domain_id) @cython.boundscheck(False) @@ -418,6 +420,25 @@ cdef class BaseNeighbourVisitor(OctVisitor): # Store neighbouring cell index in current cell cdef class NeighbourCellIndexVisitor(BaseNeighbourVisitor): + # This piece of code is very much optimizable. Here are possible routes to achieve + # much better performance: + + # - Work oct by oct, which would reduce the number of neighbor lookup + # from 4³=64 to 3³=27, + # - Use faster neighbor lookup method(s). For now, all searches are started from + # the root mesh down to leaf nodes, but we could instead go up the tree from the + # central oct then down to find all neighbors (see e.g. + # https://geidav.wordpress.com/2017/12/02/advanced-octrees-4-finding-neighbor-nodes/). + # - Pre-compute the face-neighbors of all octs. + + # Note that for the last point, algorithms exist that generate the neighbors of all + # octs in O(1) time (https://link.springer.com/article/10.1007/s13319-015-0060-9) + # during the octree construction. + # Another possible solution would be to keep a unordered hash map of all the octs + # indexed by their (3-integers) position. With such structure, finding a neighbor + # takes O(1) time. This could even come as a replacement of the current + # pointer-based octree structure. + @cython.boundscheck(False) @cython.wraparound(False) @cython.initializedcheck(False) @@ -506,4 +527,3 @@ cdef class NeighbourCellVisitor(BaseNeighbourVisitor): self.domains[self.index] = neigh_domain self.index += 1 - From 13e5dfad9fa9f73e5ef87dd0a901c45fcc50222b Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 17 Jul 2020 10:06:26 +0100 Subject: [PATCH 061/653] Inform Cython about strides --- yt/frontends/ramses/io_utils.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 21abdb1609e..1b29199e176 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -106,8 +106,8 @@ cpdef read_offset(FortranFile f, INT64_t min_level, INT64_t domain_id, INT64_t n offset = np.full((ncpu_and_bound, n_levels), -1, dtype=np.int64) level_count = np.zeros((ncpu_and_bound, n_levels), dtype=np.int64) - cdef np.int64_t[:,:] level_count_view = level_count - cdef np.int64_t[:,:] offset_view = offset + cdef np.int64_t[:, ::1] level_count_view = level_count + cdef np.int64_t[:, ::1] offset_view = offset for ilevel in range(nlevelmax): for icpu in range(ncpu_and_bound): From 89c2b6cbef527b49fba7ebb537dced011d16779f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 18 Jul 2020 10:48:41 +0200 Subject: [PATCH 062/653] add .git-blame-ignore-revs with the list of all automated commits to be ignored by git blame --- .git-blame-ignore-revs | 8 ++++++++ 1 file changed, 8 insertions(+) create mode 100644 .git-blame-ignore-revs diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 00000000000..f5a1bce97b2 --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,8 @@ +# transition to isort +7edfcee093cca277307aabdb180e0ffc69768291 + +# transisiton to black +ebadee629414aed2c7b6526e22a419205329ec38 + +# automated trailing whitespace removal +3ee548b04a41dfbc009921c492fba6a0682651ca From 38530494b8b6f7eedf66eb6e09838ab3ae5e5159 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 19 May 2020 11:22:14 +0200 Subject: [PATCH 063/653] ci: add precommit hooks for flake8, isort and black --- .pre-commit-config.yaml | 13 +++++++++++++ 1 file changed, 13 insertions(+) create mode 100644 .pre-commit-config.yaml diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000000..c5648aa48aa --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,13 @@ +- repo: https://gitlab.com/pycqa/flake8 + rev: '' + hooks: + - id: flake8 +- repo: https://github.com/timothycrosley/isort + rev: '' + hooks: + - id: isort +- repo: https://github.com/ambv/black + rev: 19.10b0 + hooks: + - id: black + language_version: python3.7 From b1995407092950c600d40d6974f692acb8d8187e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 13:13:16 +0100 Subject: [PATCH 064/653] tr -> data --- yt/frontends/ramses/data_structures.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 5b827340cd2..8e75ea137aa 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -224,7 +224,7 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): oct_handler = self.oct_handler all_fields = [f for ft, f in file_handler.field_list] fields = [f for ft, f in fields] - tr = {} + data = {} cell_count = selector.count_oct_cells(self.oct_handler, self.domain_id) levels, cell_inds, file_inds = self.oct_handler.file_index_octs( @@ -233,7 +233,7 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): # Initializing data container for field in fields: - tr[field] = np.zeros(cell_count, "float64") + data[field] = np.zeros(cell_count, "float64") cpu_list = [self.domain_id - 1] fill_hydro( @@ -247,10 +247,10 @@ def _fill_no_ghostzones(self, fd, fields, selector, file_handler): ndim, all_fields, fields, - tr, + data, oct_handler, ) - return tr + return data def _fill_with_ghostzones( self, fd, fields, selector, file_handler, num_ghost_zones From 8ecce954c0414911a3986b1d9b7268b8dbf27d86 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 13:18:48 +0100 Subject: [PATCH 065/653] Make isort happy --- yt/frontends/ramses/data_structures.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 8e75ea137aa..a862424f114 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -2,6 +2,7 @@ import weakref from collections import defaultdict from glob import glob + import numpy as np from yt.arraytypes import blankRecordArray From a3719425c25c80d5e840752e0a990347a1460b5d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 13:37:45 +0100 Subject: [PATCH 066/653] Fix black travis --- .travis.yml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.travis.yml b/.travis.yml index 433b2b286a5..505b073bf23 100644 --- a/.travis.yml +++ b/.travis.yml @@ -85,6 +85,9 @@ jobs: - stage: Lint python: 3.6 script: isort --check-only -rc yt/ + + - stage: Lint + python: 3.6 script: black --check yt/ - stage: tests From 038b35dcac094701776eb999b2a3187acfdfe6c8 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 13:41:38 +0100 Subject: [PATCH 067/653] Add names to stages --- .travis.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.travis.yml b/.travis.yml index 505b073bf23..afc364c02f8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -79,6 +79,7 @@ install: jobs: include: - stage: Lint + name: "flake8" python: 3.6 script: flake8 yt/ From ce68d3e41981ee8ee3adde9d3b61667c033ee0fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 13 Jun 2020 20:07:07 +0200 Subject: [PATCH 068/653] cleanup legacy python checks and associated special cases --- .coveragerc | 1 - .pep8speaks.yml | 1 - setup.cfg | 1 - .../construction_data_containers.py | 21 +- yt/fields/tests/test_fields_plugins.py | 8 +- yt/frontends/exodus_ii/util.py | 8 +- yt/frontends/sdf/data_structures.py | 3 - yt/frontends/tipsy/data_structures.py | 4 - yt/funcs.py | 3 +- yt/utilities/io_handler.py | 4 +- yt/utilities/lodgeit.py | 11 +- yt/utilities/lru_cache.py | 188 ------------------ yt/utilities/poster/streaminghttp.py | 23 +-- yt/utilities/tests/test_config.py | 9 +- yt/visualization/plot_window.py | 5 +- 15 files changed, 25 insertions(+), 265 deletions(-) delete mode 100644 yt/utilities/lru_cache.py diff --git a/.coveragerc b/.coveragerc index 1a4aca30108..2dea13250ef 100644 --- a/.coveragerc +++ b/.coveragerc @@ -13,7 +13,6 @@ omit=*.yml yt/mods.py yt/utilities/fits_image.py yt/utilities/lodgeit.py - yt/utilities/lru_cache.py yt/utilities/poster/* yt/visualization/_mpl_imports.py diff --git a/.pep8speaks.yml b/.pep8speaks.yml index c225b1cd6e6..3538cb33772 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -75,7 +75,6 @@ pycodestyle: - \*/__config__.py - yt/visualization/_mpl_imports.py - yt/utilities/lodgeit.py - - yt/utilities/lru_cache.py - yt/utilities/poster/\* - yt/extern/\* - yt/mods.py diff --git a/setup.cfg b/setup.cfg index 6a80be350ae..a6db023776d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -25,7 +25,6 @@ exclude = doc, yt/visualization/_mpl_imports.py, yt/utilities/fits_image.py, yt/utilities/lodgeit.py, - yt/utilities/lru_cache.py, yt/mods.py, yt/visualization/_colormap_data.py, diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 868631fc345..03635a90c20 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -1,7 +1,6 @@ import fileinput import io import os -import sys import zipfile from functools import wraps from re import finditer @@ -1878,18 +1877,16 @@ def _color_samples_obj( em = np.log10(em) if color_field is not None: if color_field_min is None: - if sys.version_info > (3,): - cs = [float(field) for field in cs] - cs = np.array(cs) + cs = [float(field) for field in cs] + cs = np.array(cs) mi = cs.min() else: mi = color_field_min if color_log: mi = np.log10(mi) if color_field_max is None: - if sys.version_info > (3,): - cs = [float(field) for field in cs] - cs = np.array(cs) + cs = [float(field) for field in cs] + cs = np.array(cs) ma = cs.max() else: ma = color_field_max @@ -1907,18 +1904,16 @@ def _color_samples_obj( # now, get emission if emit_field is not None: if emit_field_min is None: - if sys.version_info > (3,): - em = [float(field) for field in em] - em = np.array(em) + em = [float(field) for field in em] + em = np.array(em) emi = em.min() else: emi = emit_field_min if emit_log: emi = np.log10(emi) if emit_field_max is None: - if sys.version_info > (3,): - em = [float(field) for field in em] - em = np.array(em) + em = [float(field) for field in em] + em = np.array(em) ema = em.max() else: ema = emit_field_max diff --git a/yt/fields/tests/test_fields_plugins.py b/yt/fields/tests/test_fields_plugins.py index 4b7431f251f..0cdacfd25a6 100644 --- a/yt/fields/tests/test_fields_plugins.py +++ b/yt/fields/tests/test_fields_plugins.py @@ -1,5 +1,4 @@ import os -import sys import unittest import yt @@ -75,12 +74,9 @@ def testCustomField(self): plugin_file = os.path.join(CONFIG_DIR, ytcfg.get("yt", "pluginfilename")) msg = "INFO:yt:Loading plugins from %s" % plugin_file - if sys.version_info >= (3, 4, 0): - with self.assertLogs("yt", level="INFO") as cm: - yt.enable_plugins() - self.assertEqual(cm.output, [msg]) - else: + with self.assertLogs("yt", level="INFO") as cm: yt.enable_plugins() + self.assertEqual(cm.output, [msg]) ds = fake_random_ds(16) dd = ds.all_data() diff --git a/yt/frontends/exodus_ii/util.py b/yt/frontends/exodus_ii/util.py index 307d32edcc0..7ce4464bb60 100644 --- a/yt/frontends/exodus_ii/util.py +++ b/yt/frontends/exodus_ii/util.py @@ -1,13 +1,10 @@ import re import string -import sys from collections import OrderedDict from itertools import takewhile import numpy as np -_printable = set([ord(_) for _ in string.printable]) - def get_num_pseudo_dims(coords): D = coords.shape[1] @@ -15,9 +12,8 @@ def get_num_pseudo_dims(coords): def sanitize_string(s): - if sys.version_info > (3,): - return "".join([chr(_) for _ in takewhile(lambda a: a in _printable, s)]) - return "".join([_ for _ in takewhile(lambda a: a in string.printable, s)]) + _printable = set([ord(_) for _ in string.printable]) + return "".join([chr(_) for _ in takewhile(lambda a: a in _printable, s)]) def load_info_records(info_records): diff --git a/yt/frontends/sdf/data_structures.py b/yt/frontends/sdf/data_structures.py index 4bdc48b8e40..0466a4556bb 100644 --- a/yt/frontends/sdf/data_structures.py +++ b/yt/frontends/sdf/data_structures.py @@ -1,6 +1,5 @@ import contextlib import os -import sys import numpy as np @@ -15,8 +14,6 @@ @contextlib.contextmanager def safeopen(*args, **kwargs): - if sys.version[0] != "3": - kwargs.pop("encoding") with open(*args, **kwargs) as f: yield f diff --git a/yt/frontends/tipsy/data_structures.py b/yt/frontends/tipsy/data_structures.py index b90899f1c39..cf1f2f8db43 100644 --- a/yt/frontends/tipsy/data_structures.py +++ b/yt/frontends/tipsy/data_structures.py @@ -1,7 +1,6 @@ import glob import os import struct -import sys import numpy as np @@ -14,9 +13,6 @@ from .fields import TipsyFieldInfo -if sys.version_info > (3,): - long = int - class TipsyFile(ParticleFile): def __init__(self, ds, io, filename, file_id, range=None): diff --git a/yt/funcs.py b/yt/funcs.py index c69ff1cfdd7..bd8d27456fd 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -18,7 +18,7 @@ import urllib.parse import urllib.request import warnings -from functools import wraps +from functools import lru_cache, wraps from math import ceil, floor from numbers import Number as numeric_type @@ -29,7 +29,6 @@ from yt.units import YTArray, YTQuantity from yt.utilities.exceptions import YTInvalidWidthError from yt.utilities.logger import ytLogger as mylog -from yt.utilities.lru_cache import lru_cache # Some functions for handling sequences and other types diff --git a/yt/utilities/io_handler.py b/yt/utilities/io_handler.py index d759f9ba879..01dfe17acce 100644 --- a/yt/utilities/io_handler.py +++ b/yt/utilities/io_handler.py @@ -1,11 +1,11 @@ import os from collections import defaultdict from contextlib import contextmanager +from functools import _make_key, lru_cache import numpy as np from yt.geometry.selection_routines import GridSelector -from yt.utilities.lru_cache import _make_key, local_lru_cache from yt.utilities.on_demand_imports import _h5py as h5py io_registry = {} @@ -25,7 +25,7 @@ def __init__(cls, name, b, d): if hasattr(cls, "_dataset_type"): io_registry[cls._dataset_type] = cls if use_caching and hasattr(cls, "_read_obj_field"): - cls._read_obj_field = local_lru_cache( + cls._read_obj_field = lru_cache( maxsize=use_caching, typed=True, make_key=_make_io_key )(cls._read_obj_field) diff --git a/yt/utilities/lodgeit.py b/yt/utilities/lodgeit.py index 010c7f98233..3d5d8e4e20a 100644 --- a/yt/utilities/lodgeit.py +++ b/yt/utilities/lodgeit.py @@ -31,9 +31,6 @@ import sys from optparse import OptionParser -if sys.version_info >= (3, 0, 0): - unicode = str - SCRIPT_NAME = os.path.basename(sys.argv[0]) VERSION = "0.3" SERVICE_URL = "http://paste.yt-project.org/" @@ -92,14 +89,14 @@ def load_default_settings(): def make_utf8(text, encoding): """Convert a text to UTF-8, brute-force.""" try: - u = unicode(text, "utf-8") + u = str(text, "utf-8") uenc = "utf-8" except UnicodeError: try: - u = unicode(text, encoding) + u = str(text, encoding) uenc = "utf-8" except UnicodeError: - u = unicode(text, "iso-8859-15", "ignore") + u = str(text, "iso-8859-15", "ignore") uenc = "iso-8859-15" try: import chardet @@ -108,7 +105,7 @@ def make_utf8(text, encoding): d = chardet.detect(text) if d["encoding"] == uenc: return u.encode("utf-8") - return unicode(text, d["encoding"], "ignore").encode("utf-8") + return str(text, d["encoding"], "ignore").encode("utf-8") def get_xmlrpc_service(): diff --git a/yt/utilities/lru_cache.py b/yt/utilities/lru_cache.py deleted file mode 100644 index 71ee1d6fcbe..00000000000 --- a/yt/utilities/lru_cache.py +++ /dev/null @@ -1,188 +0,0 @@ -import sys -from collections import namedtuple -from functools import update_wrapper -from threading import RLock - -_CacheInfo = namedtuple("CacheInfo", ["hits", "misses", "maxsize", "currsize"]) - - -class _HashedSeq(list): - __slots__ = "hashvalue" - - def __init__(self, tup, hash=hash): - self[:] = tup - self.hashvalue = hash(tup) - - def __hash__(self): - return self.hashvalue - - -def _make_key( - args, - kwds, - typed, - kwd_mark=(object(),), - fasttypes=set((int, str, frozenset, type(None))), - sorted=sorted, - tuple=tuple, - type=type, - len=len, -): - "Make a cache key from optionally typed positional and keyword arguments" - key = args - if kwds: - sorted_items = sorted(kwds.items()) - key += kwd_mark - for item in sorted_items: - key += item - if typed: - key += tuple(type(v) for v in args) - if kwds: - key += tuple(type(v) for k, v in sorted_items) - elif len(key) == 1 and type(key[0]) in fasttypes: - return key[0] - return _HashedSeq(key) - - -def lru_cache(maxsize=100, typed=False, make_key=_make_key): - """Least-recently-used cache decorator. - If *maxsize* is set to None, the LRU features are disabled and the cache - can grow without bound. - If *typed* is True, arguments of different types will be cached separately. - For example, f(3.0) and f(3) will be treated as distinct calls with - distinct results. - Arguments to the cached function must be hashable. - View the cache statistics named tuple (hits, misses, maxsize, currsize) with - f.cache_info(). Clear the cache and statistics with f.cache_clear(). - Access the underlying function with f.__wrapped__. - See: - """ - - # Users should only access the lru_cache through its public API: - # cache_info, cache_clear, and f.__wrapped__ - # The internals of the lru_cache are encapsulated for thread safety and - # to allow the implementation to change (including a possible C version). - - def decorating_function(user_function): - - cache = dict() - stats = [0, 0] # make statistics updateable non-locally - HITS, MISSES = 0, 1 # names for the stats fields - cache_get = cache.get # bound method to lookup key or return None - _len = len # localize the global len() function - lock = RLock() # because linkedlist updates aren't threadsafe - root = [] # root of the circular doubly linked list - root[:] = [root, root, None, None] # initialize by pointing to self - nonlocal_root = [root] # make updateable non-locally - PREV, NEXT, KEY, RESULT = 0, 1, 2, 3 # names for the link fields - - if maxsize == 0: - - def wrapper(*args, **kwds): - # no caching, just do a statistics update after a successful call - result = user_function(*args, **kwds) - stats[MISSES] += 1 - return result - - elif maxsize is None: - - def wrapper(*args, **kwds): - # simple caching without ordering or size limit - key = make_key(args, kwds, typed) - result = cache_get( - key, root - ) # root used here as a unique not-found sentinel - if result is not root: - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - cache[key] = result - stats[MISSES] += 1 - return result - - else: - - def wrapper(*args, **kwds): - # size limited caching that tracks accesses by recency - try: - key = make_key(args, kwds, typed) if kwds or typed else args - except TypeError: - stats[MISSES] += 1 - return user_function(*args, **kwds) - with lock: - link = cache_get(key) - if link is not None: - # record recent use of the key by moving it to the front of the list - (root,) = nonlocal_root - link_prev, link_next, key, result = link - link_prev[NEXT] = link_next - link_next[PREV] = link_prev - last = root[PREV] - last[NEXT] = root[PREV] = link - link[PREV] = last - link[NEXT] = root - stats[HITS] += 1 - return result - result = user_function(*args, **kwds) - with lock: - (root,) = nonlocal_root - if key in cache: - # getting here means that this same key was added to the - # cache while the lock was released. since the link - # update is already done, we need only return the - # computed result and update the count of misses. - pass - elif _len(cache) >= maxsize: - # use the old root to store the new key and result - oldroot = root - oldroot[KEY] = key - oldroot[RESULT] = result - # empty the oldest link and make it the new root - root = nonlocal_root[0] = oldroot[NEXT] - oldkey = root[KEY] - oldvalue = root[RESULT] - root[KEY] = root[RESULT] = None - # now update the cache dictionary for the new links - del cache[oldkey] - cache[key] = oldroot - else: - # put result in a new link at the front of the list - last = root[PREV] - link = [last, root, key, result] - last[NEXT] = root[PREV] = cache[key] = link - stats[MISSES] += 1 - return result - - def cache_info(): - """Report cache statistics""" - with lock: - return _CacheInfo(stats[HITS], stats[MISSES], maxsize, len(cache)) - - def cache_clear(): - """Clear the cache and cache statistics""" - with lock: - cache.clear() - root = nonlocal_root[0] - root[:] = [root, root, None, None] - stats[:] = [0, 0] - - wrapper.__wrapped__ = user_function - wrapper.cache_info = cache_info - wrapper.cache_clear = cache_clear - return update_wrapper(wrapper, user_function) - - return decorating_function - - -### End of backported lru_cache - -local_lru_cache = lru_cache - -if sys.version_info[:2] >= (3, 3): - # 3.2 has an lru_cache with an incompatible API - from functools import lru_cache - -try: - from fastcache import clru_cache as lru_cache -except ImportError: - pass diff --git a/yt/utilities/poster/streaminghttp.py b/yt/utilities/poster/streaminghttp.py index 2d60fe5ba43..24443094017 100644 --- a/yt/utilities/poster/streaminghttp.py +++ b/yt/utilities/poster/streaminghttp.py @@ -32,21 +32,6 @@ import sys import urllib - -def request_has_data(req): - if sys.version_info >= (3, 0, 0): - return hasattr(req, "data") - else: - return req.has_data() - - -def request_get_data(req): - if sys.version_info >= (3, 0, 0): - return req.data - else: - return req.get_data() - - __all__ = [ "StreamingHTTPConnection", "StreamingHTTPRedirectHandler", @@ -182,8 +167,8 @@ def http_request(self, req): if we're using an iterable value""" # Make sure that if we're using an iterable object as the request # body, that we've also specified Content-Length - if request_has_data(req): - data = request_get_data(req) + if hasattr(req, "data"): + data = req.data if hasattr(data, "read") or hasattr(data, "next"): if not req.has_header("Content-length"): raise ValueError("No Content-Length specified for iterable body") @@ -208,8 +193,8 @@ def https_open(self, req): def https_request(self, req): # Make sure that if we're using an iterable object as the request # body, that we've also specified Content-Length - if request_has_data(req): - data = request_get_data(req) + if hasattr(req, "data")(req): + data = req.data if hasattr(data, "read") or hasattr(data, "next"): if not req.has_header("Content-length"): raise ValueError( diff --git a/yt/utilities/tests/test_config.py b/yt/utilities/tests/test_config.py index d8744a10358..9e5d0a0ca5f 100644 --- a/yt/utilities/tests/test_config.py +++ b/yt/utilities/tests/test_config.py @@ -2,6 +2,7 @@ import os import sys import unittest +import unittest.mock as mock from configparser import NoOptionError from io import StringIO @@ -10,14 +11,6 @@ from yt.config import _OLD_CONFIG_FILE, CONFIG_DIR, CURRENT_CONFIG_FILE, YTConfigParser from yt.fields.tests.test_fields_plugins import TEST_PLUGIN_FILE -if sys.version_info.major < 3: - try: - import mock - except ImportError: - mock = None -else: - import unittest.mock as mock - _TEST_PLUGIN = "_test_plugin.py" _DUMMY_CFG = ["[yt]", "loglevel = 49", "pluginfilename = " + _TEST_PLUGIN] diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 010b4ec1b27..fb2799fa715 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -57,10 +57,7 @@ # Some magic for dealing with pyparsing being included or not # included in matplotlib (not in gentoo, yes in everything else) try: - if sys.version_info[0] == 3: - from matplotlib.pyparsing_py3 import ParseFatalException - else: - from matplotlib.pyparsing_py2 import ParseFatalException + from matplotlib.pyparsing_py3 import ParseFatalException except ImportError: from pyparsing import ParseFatalException From e2dd33cf9f4569a652202716e90e7edb0875f2a5 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 20:47:43 +0100 Subject: [PATCH 069/653] Set names --- .travis.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.travis.yml b/.travis.yml index afc364c02f8..5822e795176 100644 --- a/.travis.yml +++ b/.travis.yml @@ -84,10 +84,12 @@ jobs: script: flake8 yt/ - stage: Lint + name: "isort" python: 3.6 script: isort --check-only -rc yt/ - stage: Lint + name: "black" python: 3.6 script: black --check yt/ From c033f18b37ac634693ba1d4b7d9834c61176a50a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 18 Jul 2020 20:48:47 +0100 Subject: [PATCH 070/653] Fix flaking --- yt/data_objects/data_containers.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 1e3b543ff16..e9a0f5cf6ac 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -368,7 +368,6 @@ def _generate_spatial_fluid(self, field, ngz): np.empty(wogz.ires.size, dtype="float64"), units ) outputs.append(rv) - data = gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz] ind += wogz.select( self.selector, source=gz[field][ngz:-ngz, ngz:-ngz, ngz:-ngz], From daa6750b3af6bfd893bb2bdd66851a8e4118106e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 5 Jul 2020 08:51:03 +0200 Subject: [PATCH 071/653] fix: support automatic slice expansion for 2D datasets --- yt/data_objects/region_expression.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index 1a166a70ead..0934118a29e 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -41,8 +41,8 @@ def __getitem__(self, item): # possible use case of this would be where we supply something # like ds.r[::256j] . This would be expanded, implicitly into # ds.r[::256j, ::256j, ::256j]. Other cases would be if we do - # ds.r[0.1:0.9] where it will be expanded along three dimensions. - item = (item, item, item) + # ds.r[0.1:0.9] where it will be expanded along all dimensions. + item = tuple(item for _ in range(self.ds.dimensionality)) if len(item) != self.ds.dimensionality: # Not the right specification, and we don't want to do anything # implicitly. Note that this happens *after* the implicit expansion From 01f14efc91cfedae0573457554c8c65612108608 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 5 Jul 2020 10:27:51 +0200 Subject: [PATCH 072/653] refactor: EAFP pattern in RegionExpression.__getitem__ --- yt/data_objects/region_expression.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index 0934118a29e..40f8548d6d8 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -2,7 +2,7 @@ from yt.funcs import obj_length from yt.units.yt_array import YTQuantity -from yt.utilities.exceptions import YTDimensionalityError +from yt.utilities.exceptions import YTDimensionalityError, YTFieldNotParseable from yt.visualization.line_plot import LineBuffer from .data_containers import _get_ipython_key_completion @@ -24,12 +24,11 @@ def __getitem__(self, item): # At first, we will only implement this as accepting a slice that is # (optionally) unitful corresponding to a specific set of coordinates # that result in a rectangular prism or a slice. - if isinstance(item, str): - # This is some field; we will instead pass this back to the - # all_data object. - return self.all_data[item] - if isinstance(item, tuple) and isinstance(item[1], str): + try: return self.all_data[item] + except (TypeError, YTFieldNotParseable): + pass + if isinstance(item, slice): if obj_length(item.start) == 3 and obj_length(item.stop) == 3: # This is for a ray that is not orthogonal to an axis. From 743251fd961564ed947a5d076231e307d9e3e3df Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Sun, 19 Jul 2020 14:49:13 -0500 Subject: [PATCH 073/653] Reformat with black --- yt/utilities/answer_testing/framework.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 9d96ead4fd7..a2c49344516 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -29,9 +29,7 @@ from yt.data_objects.static_output import Dataset from yt.data_objects.time_series import SimulationTimeSeries from yt.funcs import get_pbar -from yt.funcs import \ - get_pbar, \ - issue_deprecation_warning +from yt.funcs import get_pbar, issue_deprecation_warning from yt.testing import ( assert_allclose_units, assert_almost_equal, @@ -320,9 +318,11 @@ def can_run_ds(ds_fn, file_check=False): def can_run_sim(sim_fn, sim_type, file_check=False): - issue_deprecation_warning("This function is no longer used in the " + - "yt project testing framework and is " + - "targeted for deprecation.") + issue_deprecation_warning( + "This function is no longer used in the " + + "yt project testing framework and is " + + "targeted for deprecation." + ) result_storage = AnswerTestingTest.result_storage if isinstance(sim_fn, SimulationTimeSeries): return result_storage is not None @@ -1090,9 +1090,12 @@ def compare(self, new_result, old_result): def requires_sim(sim_fn, sim_type, big_data=False, file_check=False): - issue_deprecation_warning("This function is no longer used in the " + - "yt project testing framework and is " + - "targeted for deprecation.") + issue_deprecation_warning( + "This function is no longer used in the " + + "yt project testing framework and is " + + "targeted for deprecation." + ) + def ffalse(func): return lambda: None From 76c3b8c5ed44aab0c9e2625203114981715bafa8 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 10 Jul 2020 10:55:50 -0500 Subject: [PATCH 074/653] Bump test deps to hit py38 wheels --- tests/test_requirements.txt | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index 244ca8f6784..cd077e33c8e 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -1,15 +1,15 @@ -astropy==3.0.5 +astropy==3.2.3 codecov==2.0.15 -coverage==4.5.1 +coverage==4.5.4 fastcache==1.0.2 glueviz==0.13.3 h5py==2.10.0 ipython==7.1.1 matplotlib==3.1.3 mock==2.0.0; python_version < '3.0' -nose-timer==0.7.3 +nose-timer==1.0.0 nose==1.3.7 -pandas==0.23.4 +pandas==0.25.3 pytest~=5.2 requests==2.20.0 scipy==1.3.3 From 91f9d80394833608ce49e10d2f44f04da0e92179 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sat, 11 Jul 2020 19:07:32 -0500 Subject: [PATCH 075/653] Only antialias if overlap is significant^{TM} --- yt/utilities/lib/pixelization_routines.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 61d586509dc..fb20e0f89e3 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -245,7 +245,7 @@ def pixelize_cartesian(np.float64_t[:,:] buff, # conservative about the iteration indices. # This will reduce artifacts if we ever move to # compositing instead of replacing bitmaps. - if overlap1 * overlap2 == 0.0: continue + if overlap1 * overlap2 < 1.e-6: continue buff[i,j] += (dsp * overlap1) * overlap2 else: buff[i,j] = dsp From 4b84d906dee7f59929a8c52b4fbefff6076938b1 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 14 Jul 2020 12:33:36 -0500 Subject: [PATCH 076/653] Generate new answers for enzo_p and plot_window --- tests/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 5249c5794ff..893ee953a73 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -44,7 +44,7 @@ answer_tests: - yt/frontends/enzo/tests/test_outputs.py:test_ecp - yt/frontends/enzo/tests/test_outputs.py:test_nuclei_density_fields - local_enzo_p_007: + local_enzo_p_008: # PR 2735 - yt/frontends/enzo_p/tests/test_outputs.py:test_hello_world - yt/frontends/enzo_p/tests/test_outputs.py:test_particle_fields @@ -86,7 +86,7 @@ answer_tests: - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter - local_pw_029: + local_pw_030: # PR 2735 - yt/visualization/tests/test_plotwindow.py:test_attributes - yt/visualization/tests/test_plotwindow.py:test_attributes_wt - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers From 4d7026be16631703e9a018524354cb46da00b73c Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sat, 18 Jul 2020 11:37:37 -0500 Subject: [PATCH 077/653] Avoid updating brew to save some time --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 433b2b286a5..a4a067cd2b8 100644 --- a/.travis.yml +++ b/.travis.yml @@ -30,7 +30,7 @@ before_install: else sudo mkdir -p /usr/local/man sudo chown -R "${USER}:admin" /usr/local/man - brew update + # brew update # do not update to save build time brew install python ccache hdf5 proj geos openmpi netcdf brew uninstall gdal postgis numpy export PATH=/usr/local/opt/ccache/libexec:$PATH From 7ac2111535d7f4fd23df807a73ee1b1b89490550 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sat, 18 Jul 2020 12:07:23 -0500 Subject: [PATCH 078/653] Try to fix wheels cache on OSX --- .travis.yml | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/.travis.yml b/.travis.yml index a4a067cd2b8..e6c24d2549e 100644 --- a/.travis.yml +++ b/.travis.yml @@ -68,7 +68,7 @@ install: # pyproject.toml in cartopy. # These versions are pinned, so we will need to update/remove them when # the hack is no longer necessary. - $PIP install numpy==1.18.1 cython==0.29.6 + $PIP install numpy==1.18.1 cython==0.29.14 CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" $PIP install -r tests/test_requirements.txt fi $PIP install -e . @@ -116,14 +116,11 @@ jobs: name: "MacOS: Unit Tests" os: osx osx_image: xcode10.1 - language: generic # https://github.com/travis-ci/travis-ci/issues/2312 + language: shell env: PIP=pip3 cache: - pip: false - directories: + - directories: - $HOME/Library/Caches/pip - # `cache` does not support `env`-like `global` so copy-paste from top - - $HOME/.ccache # https://github.com/travis-ci/travis-ci/issues/5853 script: nosetests -c nose_unit.cfg after_success: From 8ec54a178575220d2247958ce3af54ad3c19f0f3 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sat, 18 Jul 2020 13:32:43 -0500 Subject: [PATCH 079/653] Bump mpl to get eps/ps speed-ups --- tests/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index cd077e33c8e..c4cb9c1a38d 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -5,7 +5,7 @@ fastcache==1.0.2 glueviz==0.13.3 h5py==2.10.0 ipython==7.1.1 -matplotlib==3.1.3 +matplotlib==3.3.0 mock==2.0.0; python_version < '3.0' nose-timer==1.0.0 nose==1.3.7 From 726e2b7bf5ec1948b850471d4cdaf5fdfdb09a73 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sat, 18 Jul 2020 23:14:58 -0500 Subject: [PATCH 080/653] Specify format for Image.save --- yt/utilities/png_writer.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/png_writer.py b/yt/utilities/png_writer.py index e4e62853ed9..e5314d5e932 100644 --- a/yt/utilities/png_writer.py +++ b/yt/utilities/png_writer.py @@ -13,7 +13,7 @@ def call_png_write_png(buffer, fileobj, dpi): try: _png.write_png(buffer, fileobj, dpi) except NameError: - Image.fromarray(buffer).save(fileobj, dpi=(dpi, dpi)) + Image.fromarray(buffer).save(fileobj, dpi=(dpi, dpi), format="png") def write_png(buffer, filename, dpi=100): From 97f96ed7cf5976703085ec86ef6da70d0f28518a Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 09:04:35 -0500 Subject: [PATCH 081/653] Fix the test_config for a tiny terminal width --- yt/utilities/tests/test_config.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/yt/utilities/tests/test_config.py b/yt/utilities/tests/test_config.py index 9e5d0a0ca5f..388fc8aad33 100644 --- a/yt/utilities/tests/test_config.py +++ b/yt/utilities/tests/test_config.py @@ -82,12 +82,18 @@ def testConfigCommands(self): if mock is None: return + def remove_spaces_and_breaks(s): + return "".join(s.split()) + self.assertFalse(os.path.exists(CURRENT_CONFIG_FILE)) info = self._runYTConfig(["--help"]) self.assertEqual(info["rc"], 0) self.assertEqual(info["stderr"], "") - self.assertIn("Get and set configuration values for yt", info["stdout"]) + self.assertIn( + remove_spaces_and_breaks("Get and set configuration values for yt"), + remove_spaces_and_breaks(info["stdout"]), + ) info = self._runYTConfig(["list"]) self.assertEqual(info["rc"], 0) From b99f8effa371af51a967d5deb6af6d5491cfa751 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 09:46:23 -0500 Subject: [PATCH 082/653] Don't generate apidoc for deprecated analysis modules --- doc/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/Makefile b/doc/Makefile index 342b956aee5..76095419c1b 100644 --- a/doc/Makefile +++ b/doc/Makefile @@ -49,7 +49,7 @@ html: ifneq ($(READTHEDOCS),True) SPHINX_APIDOC_OPTIONS=members,undoc-members,inherited-members,show-inheritance sphinx-apidoc \ -o source/reference/api/ \ - -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp* + -e ../yt ../yt/extern* $(shell find ../yt -name "*tests*" -type d) ../yt/utilities/voropp* ../yt/analysis_modules/* endif $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html @echo From 89a39b8c775c99c6fd2a4466135f302659844b3e Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 10:07:21 -0500 Subject: [PATCH 083/653] Remove a bazillion of MatplotlibDeprecationWarnings --- yt/funcs.py | 12 ++++++++---- yt/visualization/base_plot_types.py | 4 +++- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/yt/funcs.py b/yt/funcs.py index bd8d27456fd..57757faf448 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -18,6 +18,7 @@ import urllib.parse import urllib.request import warnings +from distutils.version import LooseVersion from functools import lru_cache, wraps from math import ceil, floor from numbers import Number as numeric_type @@ -1265,10 +1266,13 @@ def matplotlib_style_context(style_name=None, after_reset=False): available, returns a dummy context manager. """ if style_name is None: - style_name = { - "mathtext.fontset": "cm", - "mathtext.fallback_to_cm": True, - } + import matplotlib + + style_name = {"mathtext.fontset": "cm"} + if LooseVersion(matplotlib.__version__) >= LooseVersion("3.3.0"): + style_name["mathtext.fallback"] = "cm" + else: + style_name["mathtext.fallback_to_cm"] = True try: import matplotlib.style diff --git a/yt/visualization/base_plot_types.py b/yt/visualization/base_plot_types.py index 4b64ac2f130..4bfc142857a 100644 --- a/yt/visualization/base_plot_types.py +++ b/yt/visualization/base_plot_types.py @@ -134,7 +134,9 @@ def save(self, name, mpl_kwargs=None, canvas=None): if mpl_kwargs is None: mpl_kwargs = {} - if "papertype" not in mpl_kwargs: + if "papertype" not in mpl_kwargs and LooseVersion( + matplotlib.__version__ + ) < LooseVersion("3.3.0"): mpl_kwargs["papertype"] = "auto" suffix = get_image_suffix(name) From f657f19713323914750a67e0fb65efcdaaf4ac46 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 12:19:44 -0500 Subject: [PATCH 084/653] Bump answers for Travis --- answer-store | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/answer-store b/answer-store index fe17f9b706d..414c76d4ac5 160000 --- a/answer-store +++ b/answer-store @@ -1 +1 @@ -Subproject commit fe17f9b706d4bee227afb0fa2cd41df0049ae924 +Subproject commit 414c76d4ac5679b2706be11feb6d05ce31996ff9 From fddd88ce294f699c17692274e8f08c6a95d9b59b Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 14:26:49 -0500 Subject: [PATCH 085/653] Disable Eagle test until yt-project/yt#2645 is merged --- tests/tests.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/tests.yaml b/tests/tests.yaml index 893ee953a73..484cad2eaf6 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -162,5 +162,6 @@ other_tests: unittests: - '--exclude=test_mesh_slices' # disable randomly failing test - '--exclude=test_load_from_path' # py2 + - '--exclude=test_Snipshot' # until PR 2645 is merged cookbook: - 'doc/source/cookbook/tests/test_cookbook.py' From 24a7343fe060a17e543a3085a68aac594a358e4f Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 21:38:12 -0500 Subject: [PATCH 086/653] Avoid enumerate in Cython, define type of all vars. Fixes #2766 --- yt/geometry/oct_container.pyx | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index db2e13490bf..3baf3f43fb6 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -727,12 +727,13 @@ cdef class OctreeContainer: np.int64_t offset = 0): cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest - cdef int i + cdef int i, lvl for key in dest_fields: dest = dest_fields[key] source = source_fields[key] - for i, lvl in enumerate(levels): + for i in range(levels.shape[0]): + lvl = levels[i] if lvl != level: continue if file_inds[i] < 0: dest[i + offset] = np.nan @@ -819,13 +820,16 @@ cdef class OctreeContainer: """ cdef np.ndarray[np.float64_t, ndim=2] source cdef np.ndarray[np.float64_t, ndim=1] dest - cdef int i, count + cdef int i, count, lev + cdef np.int32_t dom for key in dest_fields: dest = dest_fields[key] source = source_fields[key] count = 0 - for i, (lev, dom) in enumerate(zip(levels, domains)): + for i in range(levels.shape[0]): + lev = levels[i] + dom = domains[i] if lev != level or dom != domain: continue count += 1 if file_inds[i] < 0: From eb56722a352ee300256c443f03c114c1c2516f2e Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 19 Jul 2020 23:00:46 -0500 Subject: [PATCH 087/653] __command_line needs to be set before importing yt.startup_tasks --- yt/utilities/command_line.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index a548f91b588..de9d42783ae 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -1,3 +1,4 @@ +# isort: skip-file import argparse import base64 import getpass @@ -28,7 +29,7 @@ mylog, update_hg_or_git, ) -from yt.startup_tasks import parser, subparsers + from yt.utilities.configure import set_config from yt.utilities.exceptions import ( YTCommandRequiresModule, @@ -38,7 +39,12 @@ from yt.utilities.metadata import get_metadata from yt.visualization.plot_window import ProjectionPlot, SlicePlot -ytcfg["yt", "__command_line"] = "True" +# isort: off +# This needs to be set before importing startup_tasks +ytcfg["yt", "__command_line"] = "True" # isort: skip +from yt.startup_tasks import parser, subparsers # isort: skip # noqa: E402 + +# isort: on # loading field plugins for backward compatibility, since this module # used to do "from yt.mods import *" From cb10722b72b454899c6cf96730888755051c3e56 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 1 Jul 2020 14:39:35 +0100 Subject: [PATCH 088/653] Decrease code duplication in RAMSES frontend --- yt/frontends/ramses/data_structures.py | 2 +- yt/frontends/ramses/field_handlers.py | 90 ++++++++++------- yt/frontends/ramses/particle_handlers.py | 123 ++++------------------- 3 files changed, 71 insertions(+), 144 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a862424f114..3f716955220 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -66,7 +66,7 @@ def __init__(self, ds, domain_id): # Autodetect particle files particle_handlers = [ - PH(ds, self) for PH in get_particle_handlers() if PH.any_exist(ds) + PH(self) for PH in get_particle_handlers() if PH.any_exist(ds) ] self.particle_handlers = particle_handlers for ph in particle_handlers: diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index c076e3c585f..21bb3fdebf2 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -19,7 +19,6 @@ def register_field_handler(ph): FIELD_HANDLERS.add(ph) -PRESENT_FIELD_FILES = {} DETECTED_FIELDS = {} @@ -33,35 +32,13 @@ def __new__(meta, name, bases, class_dict): cls = type.__new__(meta, name, bases, class_dict) if cls.ftype is not None: register_field_handler(cls) - return cls - - -class FieldFileHandler(metaclass=RAMSESFieldFileHandlerRegistry): - """ - Abstract class to handle particles in RAMSES. Each instance - represents a single file (one domain). - - To add support to a new particle file, inherit from this class and - implement all functions containing a `NotImplementedError`. - - See `SinkParticleFileHandler` for an example implementation.""" - - # These properties are static properties - ftype = None # The name to give to the field type - fname = None # The name of the file(s) - attrs = None # The attributes of the header - known_fields = None # A list of tuple containing the field name and its type - config_field = None # Name of the config section (if any) - file_descriptor = None # The name of the file descriptor (if any) + cls._unique_registry = {} + return cls - # These properties are computed dynamically - field_offsets = None # Mapping from field to offset in file - field_types = ( - None # Mapping from field to the type of the data (float, integer, ...) - ) - def __init__(self, domain): +class HandlerMixin: + def setup_handler(self, domain): """ Initalize an instance of the class. This automatically sets the full path to the file. This is not intended to be @@ -70,9 +47,9 @@ def __init__(self, domain): If you need more flexibility, rewrite this function to your need in the inherited class. """ + self.ds = ds = domain.ds self.domain = domain self.domain_id = domain.domain_id - ds = domain.ds basename = os.path.abspath(ds.root_folder) iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) @@ -92,17 +69,23 @@ def __init__(self, domain): self.fname = full_path else: raise FileNotFoundError( - "Could not find fluid file (type: %s). Tried %s" - % (self.ftype, full_path) + "Could not find %s file (type: %s). Tried %s" + % (self._file_type, self.ftype, full_path) ) if self.file_descriptor is not None: - self.file_descriptor = os.path.join(basename, self.file_descriptor) + if ds.num_groups > 0: + # The particle file descriptor is *only* in the first group + self.file_descriptor = os.path.join( + basename, "group_00001", self.file_descriptor + ) + else: + self.file_descriptor = os.path.join(basename, self.file_descriptor) @property def exists(self): """ - This function should return True if the *file* for the domain + This function should return True if the *file* the instance exists. It is called for each file of the type found on the disk. @@ -112,7 +95,7 @@ def exists(self): return os.path.exists(self.fname) @property - def has_part_descriptor(self): + def has_descriptor(self): """ This function should return True if a *file descriptor* exists. @@ -125,9 +108,9 @@ def has_part_descriptor(self): @classmethod def any_exist(cls, ds): """ - This function should return True if the kind of field + This function should return True if the kind of particle represented by the class exists in the dataset. It takes as - argument the class itself - not an instance - and a dataset. + argument the class itself -not an instance- and a dataset. Arguments --------- @@ -139,8 +122,8 @@ def any_exist(cls, ds): the RAMSES Dataset structure to determine if the particle type (e.g. regular particles) exists. """ - if (ds.unique_identifier, cls.ftype) in PRESENT_FIELD_FILES: - return PRESENT_FIELD_FILES[(ds.unique_identifier, cls.ftype)] + if ds.unique_identifier in cls._unique_registry: + return cls._unique_registry[ds.unique_identifier] iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) @@ -148,10 +131,41 @@ def any_exist(cls, ds): os.path.split(ds.parameter_filename)[0], cls.fname.format(iout=iout, icpu=1) ) exists = os.path.exists(fname) - PRESENT_FIELD_FILES[(ds.unique_identifier, cls.ftype)] = exists + cls._unique_registry[ds.unique_identifier] = exists return exists + +class FieldFileHandler(HandlerMixin, metaclass=RAMSESFieldFileHandlerRegistry): + """ + Abstract class to handle particles in RAMSES. Each instance + represents a single file (one domain). + + To add support to a new particle file, inherit from this class and + implement all functions containing a `NotImplementedError`. + + See `SinkParticleFileHandler` for an example implementation.""" + + _file_type = "field" + + # These properties are static properties + ftype = None # The name to give to the field type + fname = None # The name of the file(s) + attrs = None # The attributes of the header + known_fields = None # A list of tuple containing the field name and its type + config_field = None # Name of the config section (if any) + + file_descriptor = None # The name of the file descriptor (if any) + + # These properties are computed dynamically + field_offsets = None # Mapping from field to offset in file + field_types = ( + None # Mapping from field to the type of the data (float, integer, ...) + ) + + def __init__(self, domain): + self.setup_handler(domain) + @classmethod def detect_fields(cls, ds): """ diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 340b6a413bc..3b13d2ed85b 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -5,9 +5,9 @@ from yt.utilities.cython_fortran_utils import FortranFile from .io import _read_part_file_descriptor +from .field_handlers import HandlerMixin PARTICLE_HANDLERS = set() -PRESENT_PART_FILES = {} def get_particle_handlers(): @@ -28,18 +28,21 @@ def __new__(meta, name, bases, class_dict): cls = type.__new__(meta, name, bases, class_dict) if cls.ptype is not None: register_particle_handler(cls) - return cls + cls._unique_registry = {} + return cls -class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): - """ +class ParticleFileHandler(HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): + ''' Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). To add support to a new particle file, inherit from this class and implement all functions containing a `NotImplementedError`. - See `SinkParticleFileHandler` for an example implementation.""" + See `SinkParticleFileHandler` for an example implementation.''' + + _file_type = 'particle' # These properties are static properties ptype = None # The name to give to the particle type @@ -57,49 +60,8 @@ class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): ) local_particle_count = None # The number of particle in the domain - def __init__(self, ds, domain): - """ - Initalize an instance of the class. This automatically sets - the full path to the file. This is not intended to be - overriden in most cases. - - If you need more flexibility, rewrite this function to your - need in the inherited class. - """ - self.ds = ds - self.domain = domain - self.domain_id = domain.domain_id - basename = os.path.abspath(ds.root_folder) - iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) - - if ds.num_groups > 0: - igroup = ((domain.domain_id - 1) // ds.group_size) + 1 - full_path = os.path.join( - basename, - "group_{:05d}".format(igroup), - self.fname.format(iout=iout, icpu=domain.domain_id), - ) - else: - full_path = os.path.join( - basename, self.fname.format(iout=iout, icpu=domain.domain_id) - ) - - if os.path.exists(full_path): - self.fname = full_path - else: - raise FileNotFoundError( - "Could not find particle file (type: %s). Tried %s" - % (self.ptype, full_path) - ) - - if self.file_descriptor is not None: - if ds.num_groups > 0: - # The particle file descriptor is *only* in the first group - self.file_descriptor = os.path.join( - basename, "group_00001", self.file_descriptor - ) - else: - self.file_descriptor = os.path.join(basename, self.file_descriptor) + def __init__(self, domain): + self.setup_handler(domain) # Attempt to read the list of fields from the config file if self.config_field and ytcfg.has_section(self.config_field): @@ -110,59 +72,6 @@ def __init__(self, ds, domain): known_fields.append((field, field_type)) self.known_fields = known_fields - @property - def exists(self): - """ - This function should return True if the *file* the instance - exists. It is called for each file of the type found on the - disk. - - By default, it just returns whether the file exists. Override - it for more complex cases. - """ - return os.path.exists(self.fname) - - @property - def has_part_descriptor(self): - """ - This function should return True if a *file descriptor* - exists. - - By default, it just returns whether the file exists. Override - it for more complex cases. - """ - return os.path.exists(self.file_descriptor) - - @classmethod - def any_exist(cls, ds): - """ - This function should return True if the kind of particle - represented by the class exists in the dataset. It takes as - argument the class itself -not an instance- and a dataset. - - Arguments - --------- - * ds: a Ramses Dataset - - Note - ---- - This function is usually called once at the initialization of - the RAMSES Dataset structure to determine if the particle type - (e.g. regular particles) exists. - """ - if (ds.unique_identifier, cls.ptype) in PRESENT_PART_FILES: - return PRESENT_PART_FILES[(ds.unique_identifier, cls.ptype)] - - iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) - - fname = os.path.join( - os.path.split(ds.parameter_filename)[0], cls.fname.format(iout=iout, icpu=1) - ) - exists = os.path.exists(fname) - PRESENT_PART_FILES[(ds.unique_identifier, cls.ptype)] = exists - - return exists - def read_header(self): """ This function is called once per file. It should: @@ -231,8 +140,10 @@ def read_header(self): self.local_particle_count = hvals["npart"] extra_particle_fields = self.ds._extra_particle_fields - if self.has_part_descriptor: - particle_fields = _read_part_file_descriptor(self.file_descriptor) + if self.has_descriptor: + particle_fields = ( + _read_part_file_descriptor(self.file_descriptor) + ) else: particle_fields = list(self.known_fields) @@ -345,8 +256,10 @@ def read_header(self): self.local_particle_count = hvals["nsink"] # Read the fields + add the sink properties - if self.has_part_descriptor: - fields = _read_part_file_descriptor(self.file_descriptor) + if self.has_descriptor: + fields = ( + _read_part_file_descriptor(self.file_descriptor) + ) else: fields = list(self.known_fields) From b4a9919aab4bd8e243003efec3c359efae730cd4 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Mon, 20 Jul 2020 13:44:34 -0500 Subject: [PATCH 089/653] [travis/osx] Disable homebrew update --- .travis.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index e6c24d2549e..0f221c900de 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,6 +19,7 @@ addons: env: global: PIP=pip + HOMEBREW_NO_AUTO_UPDATE=1 before_install: - | @@ -30,9 +31,8 @@ before_install: else sudo mkdir -p /usr/local/man sudo chown -R "${USER}:admin" /usr/local/man - # brew update # do not update to save build time - brew install python ccache hdf5 proj geos openmpi netcdf - brew uninstall gdal postgis numpy + HOMEBREW_NO_AUTO_UPDATE=1 brew install hdf5 proj geos open-mpi netcdf ccache + HOMEBREW_NO_AUTO_UPDATE=1 brew uninstall gdal postgis numpy # WHY? export PATH=/usr/local/opt/ccache/libexec:$PATH fi mkdir -p $HOME/.config/yt From ce123c4b37bd8bedbd84e54a300e90869f838d79 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 21:12:52 +0200 Subject: [PATCH 090/653] update isort version used by travis --- tests/lint_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 2c65749c7f1..33da678badf 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -2,5 +2,5 @@ flake8==3.8.1 mccabe==0.6.1 pycodestyle==2.6.0 pyflakes==2.2.0 -isort==4.3 +isort~=5.1 black==19.10b0 From 81418e459f16c48d6b7a75d6ef8035dfe9651b39 Mon Sep 17 00:00:00 2001 From: convert-repo Date: Mon, 20 Jul 2020 07:31:30 +0200 Subject: [PATCH 091/653] second isort pass with isort 5.1.4 --- .../construction_data_containers.py | 10 +-- yt/data_objects/data_containers.py | 9 ++- yt/data_objects/profiles.py | 3 +- yt/data_objects/selection_data_containers.py | 4 +- yt/fields/field_detector.py | 2 +- yt/fields/field_type_container.py | 2 +- yt/frontends/artio/_artio_caller.pyx | 26 +++--- yt/frontends/fits/misc.py | 4 +- yt/frontends/ramses/io_utils.pyx | 5 +- yt/funcs.py | 5 +- .../coordinates/cartesian_coordinates.py | 4 +- yt/geometry/fake_octree.pyx | 6 +- yt/geometry/grid_container.pyx | 5 +- yt/geometry/grid_visitors.pyx | 8 +- yt/geometry/oct_container.pyx | 13 ++- yt/geometry/oct_visitors.pyx | 7 +- yt/geometry/particle_deposit.pyx | 14 ++-- yt/geometry/particle_oct_container.pyx | 71 +++++++++++------ yt/geometry/particle_smooth.pyx | 11 ++- yt/geometry/selection_routines.pyx | 33 +++++--- yt/startup_tasks.py | 2 +- yt/testing.py | 3 +- yt/utilities/amr_kdtree/amr_kdtree.py | 3 +- yt/utilities/command_line.py | 13 ++- yt/utilities/cython_fortran_utils.pyx | 6 +- yt/utilities/lib/allocation_container.pyx | 2 + yt/utilities/lib/alt_ray_tracers.pyx | 4 +- yt/utilities/lib/amr_kdtools.pyx | 5 +- .../lib/autogenerated_element_samplers.pyx | 6 +- yt/utilities/lib/basic_octree.pyx | 13 +-- yt/utilities/lib/bitarray.pyx | 6 +- yt/utilities/lib/bounded_priority_queue.pyx | 5 +- .../lib/bounding_volume_hierarchy.pyx | 58 +++++++------- yt/utilities/lib/contour_finding.pyx | 22 +++--- yt/utilities/lib/cosmology_time.pyx | 3 + yt/utilities/lib/cykdtree/kdtree.pyx | 8 +- yt/utilities/lib/cykdtree/utils.pyx | 10 ++- yt/utilities/lib/cyoctree.pyx | 12 +-- yt/utilities/lib/depth_first_octree.pyx | 4 +- yt/utilities/lib/distance_queue.pyx | 3 + yt/utilities/lib/element_mappings.pyx | 33 ++++---- .../lib/embree_mesh/mesh_construction.pyx | 32 ++++---- .../lib/embree_mesh/mesh_intersection.pyx | 36 +++++---- .../lib/embree_mesh/mesh_samplers.pyx | 31 ++++---- .../lib/embree_mesh/mesh_traversal.pyx | 15 ++-- yt/utilities/lib/ewah_bool_wrap.pyx | 20 +++-- yt/utilities/lib/fnv_hash.pyx | 3 +- yt/utilities/lib/fortran_reader.pyx | 6 +- yt/utilities/lib/geometry_utils.pyx | 11 ++- yt/utilities/lib/grid_traversal.pyx | 35 +++++--- yt/utilities/lib/image_samplers.pyx | 55 +++++++++---- yt/utilities/lib/image_utilities.pyx | 5 +- yt/utilities/lib/interpolators.pyx | 7 +- yt/utilities/lib/lenses.pyx | 5 +- .../lib/line_integral_convolution.pyx | 4 +- yt/utilities/lib/marching_cubes.pyx | 21 +++-- yt/utilities/lib/mesh_triangulation.pyx | 6 +- yt/utilities/lib/mesh_utilities.pyx | 10 ++- yt/utilities/lib/misc_utilities.pyx | 20 ++--- yt/utilities/lib/origami.pyx | 4 +- yt/utilities/lib/particle_kdtree_tools.pyx | 25 ++---- yt/utilities/lib/particle_mesh_operations.pyx | 7 +- yt/utilities/lib/partitioned_grid.pyx | 7 +- yt/utilities/lib/pixelization_routines.pyx | 79 +++++++++++-------- yt/utilities/lib/points_in_volume.pyx | 4 +- yt/utilities/lib/primitives.pyx | 7 +- yt/utilities/lib/quad_tree.pyx | 8 +- yt/utilities/lib/ragged_arrays.pyx | 4 +- yt/utilities/lib/tests/test_geometry_utils.py | 6 +- yt/utilities/lib/write_array.pyx | 3 +- yt/utilities/lodgeit.py | 4 +- yt/utilities/minimal_representation.py | 2 +- yt/utilities/poster/encode.py | 4 +- yt/utilities/sdf.py | 2 +- yt/utilities/voropp.pyx | 8 +- yt/visualization/color_maps.py | 6 +- yt/visualization/eps_writer.py | 5 +- yt/visualization/fits_image.py | 2 +- yt/visualization/image_writer.py | 8 +- yt/visualization/mapserver/pannable_map.py | 2 +- yt/visualization/plot_modifications.py | 2 +- .../volume_rendering/input_events.py | 7 +- .../volume_rendering/interactive_loop.py | 5 +- .../volume_rendering/interactive_vr.py | 5 +- .../interactive_vr_helpers.py | 6 +- .../volume_rendering/old_camera.py | 4 +- yt/visualization/volume_rendering/scene.py | 2 +- .../volume_rendering/shader_objects.py | 4 +- .../transfer_function_helper.py | 3 +- 89 files changed, 610 insertions(+), 410 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 03635a90c20..b75518831df 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -8,7 +8,6 @@ import numpy as np -import yt.geometry.particle_deposit as particle_deposit from yt.config import ytcfg from yt.data_objects.data_containers import ( YTSelectionContainer1D, @@ -21,6 +20,7 @@ from yt.frontends.sph.data_structures import ParticleDataset from yt.frontends.stream.api import load_uniform_grid from yt.funcs import ensure_list, get_memory_usage, iterable, mylog, only_on_root +from yt.geometry import particle_deposit as particle_deposit from yt.geometry.coordinates.cartesian_coordinates import all_data from yt.units.unit_object import Unit from yt.units.yt_array import YTArray, uconcatenate @@ -2004,9 +2004,9 @@ def _export_obj( emit_field_min, emit_field, ) # map color values to color scheme - from yt.visualization._colormap_data import ( + from yt.visualization._colormap_data import ( # import colors for mtl file color_map_luts, - ) # import colors for mtl file + ) lut = color_map_luts[color_map] # enumerate colors # interpolate emissivity to enumerated colors @@ -2237,9 +2237,9 @@ def _export_blender( emit_field_min, emit_field, ) # map color values to color scheme - from yt.visualization._colormap_data import ( + from yt.visualization._colormap_data import ( # import colors for mtl file color_map_luts, - ) # import colors for mtl file + ) lut = color_map_luts[color_map] # enumerate colors # interpolate emissivity to enumerated colors diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index e9a0f5cf6ac..5a26fce13c5 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -10,7 +10,6 @@ from unyt.exceptions import UnitConversionError, UnitParseError import yt.geometry.selection_routines -import yt.units.dimensions as ytdims from yt.data_objects.field_data import YTFieldData from yt.data_objects.profiles import create_profile from yt.fields.derived_field import DerivedField @@ -25,6 +24,7 @@ validate_width_tuple, ) from yt.geometry.selection_routines import compose_selector +from yt.units import dimensions as ytdims from yt.units.yt_array import YTArray, YTQuantity, uconcatenate from yt.utilities.amr_kdtree.api import AMRKDTree from yt.utilities.exceptions import ( @@ -709,8 +709,9 @@ def to_glue(self, fields, label="yt", data_collection=None): the Glue environment, you can pass a *data_collection* object, otherwise Glue will be started. """ + from glue.core import Data, DataCollection + from yt.config import ytcfg - from glue.core import DataCollection, Data if ytcfg.getboolean("yt", "__withintesting"): from glue.core.application_base import Application as GlueApplication @@ -819,8 +820,8 @@ def create_firefly_object( ## attempt to import firefly_api try: - from firefly_api.reader import Reader from firefly_api.particlegroup import ParticleGroup + from firefly_api.reader import Reader except ImportError: raise ImportError( "Can't find firefly_api, ensure it" @@ -1979,8 +1980,8 @@ def _convert_field_name(self, field): return field def _get_pw(self, fields, center, width, origin, plot_type): - from yt.visualization.plot_window import get_window_parameters, PWViewerMPL from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb + from yt.visualization.plot_window import PWViewerMPL, get_window_parameters axis = self.axis skip = self._key_fields diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index 09caad1cc84..5ae0d208b43 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -600,9 +600,10 @@ def to_dataframe(self, fields=None, only_used=False): >>> df1 = p.to_dataframe() >>> df2 = p.to_dataframe(fields="density", only_used=True) """ - import pandas as pd from collections import OrderedDict + import pandas as pd + idxs, masked, fields = self._export_prep(fields, only_used) pdata = OrderedDict([(self.x_field[-1], self.x[idxs])]) for field in fields: diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 3c88ce77a34..2ab2ccb4f40 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -558,11 +558,11 @@ def to_pw(self, fields=None, center="c", width=None, axes_unit=None): self.fields = ensure_list(fields) + [ k for k in self.field_data.keys() if k not in self._key_fields ] + from yt.visualization.fixed_resolution import FixedResolutionBuffer from yt.visualization.plot_window import ( - get_oblique_window_parameters, PWViewerMPL, + get_oblique_window_parameters, ) - from yt.visualization.fixed_resolution import FixedResolutionBuffer (bounds, center_rot) = get_oblique_window_parameters( normal, center, width, self.ds diff --git a/yt/fields/field_detector.py b/yt/fields/field_detector.py index 0b9473c57b8..4dba1949317 100644 --- a/yt/fields/field_detector.py +++ b/yt/fields/field_detector.py @@ -190,8 +190,8 @@ def _debug(self): return def deposit(self, *args, **kwargs): - from yt.frontends.stream.data_structures import StreamParticlesDataset from yt.data_objects.static_output import ParticleDataset + from yt.frontends.stream.data_structures import StreamParticlesDataset if kwargs["method"] == "mesh_id": if isinstance(self.ds, (StreamParticlesDataset, ParticleDataset)): diff --git a/yt/fields/field_type_container.py b/yt/fields/field_type_container.py index 189a3e632de..4bf5e503446 100644 --- a/yt/fields/field_type_container.py +++ b/yt/fields/field_type_container.py @@ -121,7 +121,7 @@ def __contains__(self, obj): def _ipython_display_(self): import ipywidgets - from IPython.display import display, Markdown + from IPython.display import Markdown, display names = dir(self) names.sort() diff --git a/yt/frontends/artio/_artio_caller.pyx b/yt/frontends/artio/_artio_caller.pyx index f62b87ad2cf..bd543f97462 100644 --- a/yt/frontends/artio/_artio_caller.pyx +++ b/yt/frontends/artio/_artio_caller.pyx @@ -1,23 +1,31 @@ # distutils: sources = ARTIO_SOURCE # distutils: include_dirs = LIB_DIR_GEOM_ARTIO cimport cython + import numpy as np + cimport numpy as np + import sys -from yt.geometry.selection_routines cimport \ - SelectorObject, AlwaysSelector, OctreeSubsetSelector -from yt.utilities.lib.fp_utils cimport imax -from yt.geometry.oct_container cimport \ - SparseOctreeContainer, OctObjectPool -from yt.geometry.oct_visitors cimport Oct -from yt.geometry.particle_deposit cimport \ - ParticleDepositOperation -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc from libc.string cimport memcpy + +from yt.geometry.oct_container cimport OctObjectPool, SparseOctreeContainer +from yt.geometry.oct_visitors cimport Oct +from yt.geometry.particle_deposit cimport ParticleDepositOperation +from yt.geometry.selection_routines cimport ( + AlwaysSelector, + OctreeSubsetSelector, + SelectorObject, +) +from yt.utilities.lib.fp_utils cimport imax + import data_structures + from yt.utilities.lib.misc_utilities import OnceIndirect + cdef extern from "platform_dep.h": ctypedef int int32_t ctypedef long long int64_t diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index 092aab4b221..ae985bf22fb 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -100,8 +100,9 @@ def create_spectral_slabs(filename, slab_centers, slab_width, **kwargs): ... nan_mask=0.0) """ from spectral_cube import SpectralCube - from yt.visualization.fits_image import FITSImageData + from yt.frontends.fits.api import FITSDataset + from yt.visualization.fits_image import FITSImageData cube = SpectralCube.read(filename) if not isinstance(slab_width, YTQuantity): @@ -157,6 +158,7 @@ def ds9_region(ds, reg, obj=None, field_parameters=None): >>> print(circle_region.quantities.extrema("flux")) """ import pyregion + from yt.frontends.fits.api import EventsFITSDataset if os.path.exists(reg): diff --git a/yt/frontends/ramses/io_utils.pyx b/yt/frontends/ramses/io_utils.pyx index 1b29199e176..6b65a214035 100644 --- a/yt/frontends/ramses/io_utils.pyx +++ b/yt/frontends/ramses/io_utils.pyx @@ -2,9 +2,12 @@ # distutils: include_dirs = LIB_DIR cimport cython cimport numpy as np + import numpy as np -from yt.utilities.cython_fortran_utils cimport FortranFile + from yt.geometry.oct_container cimport RAMSESOctreeContainer +from yt.utilities.cython_fortran_utils cimport FortranFile + from yt.utilities.exceptions import YTIllDefinedAMRData ctypedef np.int32_t INT32_t diff --git a/yt/funcs.py b/yt/funcs.py index 57757faf448..bd3462f40a4 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -528,8 +528,8 @@ def paste_traceback_detailed(exc_type, exc, tb): Should only be used in sys.excepthook. """ import cgitb - from io import StringIO import xmlrpc.client + from io import StringIO s = StringIO() handler = cgitb.Hook(format="text", file=s) @@ -892,6 +892,7 @@ def parallel_profile(prefix): ... yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass') """ import cProfile + from yt.config import ytcfg fn = "%s_%04i_%04i.cprof" % ( @@ -1105,8 +1106,8 @@ def enable_plugins(pluginfilename=None): file is shared with it. """ import yt + from yt.config import CONFIG_DIR, ytcfg from yt.fields.my_plugin_fields import my_plugins_fields - from yt.config import ytcfg, CONFIG_DIR if pluginfilename is not None: _fn = pluginfilename diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 50e74a57a8b..ae5d3979ce4 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -281,10 +281,10 @@ def pixelize_line(self, field, start_point, end_point, npoints): def _ortho_pixelize( self, data_source, field, bounds, size, antialias, dim, periodic ): + from yt.data_objects.construction_data_containers import YTParticleProj + from yt.data_objects.selection_data_containers import YTSlice from yt.frontends.sph.data_structures import ParticleDataset from yt.frontends.stream.data_structures import StreamParticlesDataset - from yt.data_objects.selection_data_containers import YTSlice - from yt.data_objects.construction_data_containers import YTParticleProj # We should be using fcoords field = data_source._determine_fields(field)[0] diff --git a/yt/geometry/fake_octree.pyx b/yt/geometry/fake_octree.pyx index bba59f80dc6..95dc5605243 100644 --- a/yt/geometry/fake_octree.pyx +++ b/yt/geometry/fake_octree.pyx @@ -9,14 +9,16 @@ Make a fake octree, deposit particle at every leaf """ -from libc.stdlib cimport malloc, free, rand, RAND_MAX cimport numpy as np +from libc.stdlib cimport RAND_MAX, free, malloc, rand from oct_visitors cimport cind + import numpy as np -cimport cython +cimport cython from oct_container cimport Oct, SparseOctreeContainer + # Create a balanced octree by a random walk that recursively # subdivides def create_fake_octree(SparseOctreeContainer oct_handler, diff --git a/yt/geometry/grid_container.pyx b/yt/geometry/grid_container.pyx index 32e3e039d10..0de64466f37 100644 --- a/yt/geometry/grid_container.pyx +++ b/yt/geometry/grid_container.pyx @@ -9,10 +9,13 @@ Matching points on the grid to specific grids import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + from yt.utilities.lib.bitarray cimport bitarray + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) diff --git a/yt/geometry/grid_visitors.pyx b/yt/geometry/grid_visitors.pyx index 0ddc95e3c72..1522976c71c 100644 --- a/yt/geometry/grid_visitors.pyx +++ b/yt/geometry/grid_visitors.pyx @@ -9,11 +9,13 @@ Grid visitor functions """ -cimport numpy as np cimport cython -from libc.stdlib cimport malloc, free +cimport numpy as np +from libc.stdlib cimport free, malloc + +from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value from yt.utilities.lib.fp_utils cimport iclip -from yt.utilities.lib.bitarray cimport ba_set_value, ba_get_value + cdef void free_tuples(GridVisitorData *data) nogil: # This wipes out the tuples, which is necessary since they are diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 3baf3f43fb6..5d9a89a842f 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -12,11 +12,18 @@ Oct container cimport cython cimport numpy as np + import numpy as np -from selection_routines cimport SelectorObject, AlwaysSelector -from libc.math cimport floor, ceil -from yt.geometry.oct_visitors cimport OctPadded, NeighbourCellVisitor, StoreIndex, NeighbourCellIndexVisitor +from libc.math cimport ceil, floor +from selection_routines cimport AlwaysSelector, SelectorObject + +from yt.geometry.oct_visitors cimport ( + NeighbourCellIndexVisitor, + NeighbourCellVisitor, + OctPadded, + StoreIndex, +) ORDER_MAX = 20 _ORDER_MAX = ORDER_MAX diff --git a/yt/geometry/oct_visitors.pyx b/yt/geometry/oct_visitors.pyx index cd951d26271..1ad2a26081b 100644 --- a/yt/geometry/oct_visitors.pyx +++ b/yt/geometry/oct_visitors.pyx @@ -11,10 +11,13 @@ Oct visitor functions cimport cython cimport numpy as np + import numpy as np + +from libc.stdlib cimport free, malloc + +from yt.geometry.oct_container cimport OctInfo, OctreeContainer from yt.utilities.lib.fp_utils cimport * -from libc.stdlib cimport malloc, free -from yt.geometry.oct_container cimport OctreeContainer, OctInfo from yt.utilities.lib.geometry_utils cimport encode_morton_64bit # Now some visitor functions diff --git a/yt/geometry/particle_deposit.pyx b/yt/geometry/particle_deposit.pyx index bfb3c5f3749..0987df728e9 100644 --- a/yt/geometry/particle_deposit.pyx +++ b/yt/geometry/particle_deposit.pyx @@ -12,18 +12,20 @@ Particle Deposition onto Cells cimport numpy as np import numpy as np -from libc.stdlib cimport malloc, free + cimport cython -from libc.math cimport sqrt from cpython cimport PyObject -from yt.utilities.lib.fp_utils cimport * - -from oct_container cimport \ - Oct, OctreeContainer, OctInfo from cpython.array cimport array, clone from cython.view cimport memoryview as cymemview +from libc.math cimport sqrt +from libc.stdlib cimport free, malloc +from oct_container cimport Oct, OctInfo, OctreeContainer + +from yt.utilities.lib.fp_utils cimport * + from yt.utilities.lib.misc_utilities import OnceIndirect + cdef append_axes(np.ndarray arr, int naxes): if arr.ndim == naxes: return arr diff --git a/yt/geometry/particle_oct_container.pyx b/yt/geometry/particle_oct_container.pyx index f5dc1fc2d1d..373c323f238 100644 --- a/yt/geometry/particle_oct_container.pyx +++ b/yt/geometry/particle_oct_container.pyx @@ -10,39 +10,64 @@ Oct container tuned for Particles """ -from libc.stdlib cimport malloc, free, qsort +from libc.math cimport ceil, floor, fmod +from libc.stdlib cimport free, malloc, qsort from libc.string cimport memset -from libc.math cimport floor, ceil, fmod from libcpp.map cimport map from libcpp.vector cimport vector -from yt.utilities.lib.ewah_bool_array cimport \ - ewah_bool_array, ewah_bool_iterator, ewah_map, bool_array, ewah_word_type + +from yt.utilities.lib.ewah_bool_array cimport ( + bool_array, + ewah_bool_array, + ewah_bool_iterator, + ewah_map, + ewah_word_type, +) + import numpy as np -cimport numpy as np -from oct_container cimport OctreeContainer, Oct, OctInfo, ORDER_MAX, \ - SparseOctreeContainer, OctKey, OctAllocationContainer -cimport oct_visitors -from oct_visitors cimport cind, OctVisitor -from yt.utilities.lib.fp_utils cimport * -from yt.utilities.lib.geometry_utils cimport bounded_morton, \ - bounded_morton_dds, bounded_morton_relative_dds, \ - bounded_morton_split_dds, bounded_morton_split_relative_dds, \ - encode_morton_64bit, decode_morton_64bit, \ - morton_neighbors_coarse, morton_neighbors_refined -from selection_routines cimport SelectorObject, AlwaysSelector cimport cython +cimport numpy as np +cimport oct_visitors +from cpython.exc cimport PyErr_CheckSignals from cython cimport floating from cython.operator cimport dereference, preincrement -from cpython.exc cimport PyErr_CheckSignals +from oct_container cimport ( + ORDER_MAX, + Oct, + OctAllocationContainer, + OctInfo, + OctKey, + OctreeContainer, + SparseOctreeContainer, +) +from oct_visitors cimport OctVisitor, cind +from selection_routines cimport AlwaysSelector, SelectorObject + +from yt.utilities.lib.fp_utils cimport * +from yt.utilities.lib.geometry_utils cimport ( + bounded_morton, + bounded_morton_dds, + bounded_morton_relative_dds, + bounded_morton_split_dds, + bounded_morton_split_relative_dds, + decode_morton_64bit, + encode_morton_64bit, + morton_neighbors_coarse, + morton_neighbors_refined, +) + from collections import defaultdict + from yt.funcs import get_pbar from particle_deposit cimport gind + #from yt.utilities.lib.ewah_bool_wrap cimport \ from ..utilities.lib.ewah_bool_wrap cimport BoolArrayCollection -import struct + import os +import struct # If set to 1, ghost cells are added at the refined level reguardless of if the # coarse cell containing it is refined in the selector. @@ -52,10 +77,12 @@ DEF RefinedExternalGhosts = 1 _bitmask_version = np.uint64(5) -from ..utilities.lib.ewah_bool_wrap cimport SparseUnorderedBitmaskSet as SparseUnorderedBitmask -from ..utilities.lib.ewah_bool_wrap cimport SparseUnorderedRefinedBitmaskSet as SparseUnorderedRefinedBitmask -from ..utilities.lib.ewah_bool_wrap cimport BoolArrayCollectionUncompressed as BoolArrayColl -from ..utilities.lib.ewah_bool_wrap cimport FileBitmasks +from ..utilities.lib.ewah_bool_wrap cimport ( + BoolArrayCollectionUncompressed as BoolArrayColl, + FileBitmasks, + SparseUnorderedBitmaskSet as SparseUnorderedBitmask, + SparseUnorderedRefinedBitmaskSet as SparseUnorderedRefinedBitmask, +) ctypedef map[np.uint64_t, bool_array] CoarseRefinedSets diff --git a/yt/geometry/particle_smooth.pyx b/yt/geometry/particle_smooth.pyx index af98bb30198..b461274435c 100644 --- a/yt/geometry/particle_smooth.pyx +++ b/yt/geometry/particle_smooth.pyx @@ -10,16 +10,15 @@ Particle smoothing in cells cimport numpy as np + import numpy as np -cimport cython +cimport cython from cpython.exc cimport PyErr_CheckSignals -from libc.stdlib cimport malloc, free, realloc +from libc.math cimport cos, fabs, sin, sqrt +from libc.stdlib cimport free, malloc, realloc from libc.string cimport memmove -from libc.math cimport sqrt, fabs, sin, cos - -from oct_container cimport \ - Oct, OctreeContainer, OctInfo +from oct_container cimport Oct, OctInfo, OctreeContainer cdef void spherical_coord_setup(np.float64_t ipos[3], np.float64_t opos[3]): diff --git a/yt/geometry/selection_routines.pyx b/yt/geometry/selection_routines.pyx index 19299980002..8821e454d66 100644 --- a/yt/geometry/selection_routines.pyx +++ b/yt/geometry/selection_routines.pyx @@ -10,23 +10,30 @@ Geometry selection routines. import numpy as np -cimport numpy as np + cimport cython -from libc.math cimport sqrt +cimport numpy as np +cimport oct_visitors from cython cimport floating -from libc.stdlib cimport malloc, free +from libc.math cimport sqrt +from libc.stdlib cimport free, malloc + +from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value from yt.utilities.lib.fnv_hash cimport c_fnv_hash as fnv_hash -from yt.utilities.lib.fp_utils cimport fclip, iclip, fmax, fmin, imin, imax -from .oct_container cimport OctreeContainer, Oct -cimport oct_visitors +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, iclip, imax, imin +from yt.utilities.lib.geometry_utils cimport ( + bounded_morton_dds, + decode_morton_64bit, + encode_morton_64bit, + morton_neighbors_coarse, + morton_neighbors_refined, +) +from yt.utilities.lib.grid_traversal cimport sampler_function, walk_volume +from yt.utilities.lib.volume_container cimport VolumeContainer + +from .oct_container cimport Oct, OctreeContainer from .oct_visitors cimport cind -from yt.utilities.lib.volume_container cimport \ - VolumeContainer -from yt.utilities.lib.grid_traversal cimport \ - sampler_function, walk_volume -from yt.utilities.lib.bitarray cimport ba_get_value, ba_set_value -from yt.utilities.lib.geometry_utils cimport encode_morton_64bit, decode_morton_64bit, \ - bounded_morton_dds, morton_neighbors_coarse, morton_neighbors_refined + cdef extern from "math.h": double exp(double x) nogil diff --git a/yt/startup_tasks.py b/yt/startup_tasks.py index 329d7143325..21e1233984e 100644 --- a/yt/startup_tasks.py +++ b/yt/startup_tasks.py @@ -29,7 +29,7 @@ def turn_on_parallelism(): ) raise e # Now we have to turn on the parallelism from the perspective of the - # parallel_analysis_interface + # parallel_analysis_interface from yt.utilities.parallel_tools.parallel_analysis_interface import ( enable_parallelism, ) diff --git a/yt/testing.py b/yt/testing.py index ddc78a056ba..beac6e2c9eb 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -1063,9 +1063,10 @@ def run_nose( call_pdb=False, module=None, ): - from yt.utilities.on_demand_imports import _nose import sys + from yt.utilities.logger import ytLogger as mylog + from yt.utilities.on_demand_imports import _nose orig_level = mylog.getEffectiveLevel() mylog.setLevel(50) diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index 7c52364bee2..8edd54d3046 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -641,9 +641,10 @@ def count_cells(self): if __name__ == "__main__": - import yt from time import time + import yt + ds = yt.load("/Users/skillman/simulations/DD1717/DD1717") ds.index diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index de9d42783ae..23770409657 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -29,7 +29,6 @@ mylog, update_hg_or_git, ) - from yt.utilities.configure import set_config from yt.utilities.exceptions import ( YTCommandRequiresModule, @@ -931,11 +930,11 @@ def __call__(self, args): if args.ds is None: print("Could not load file.") sys.exit() - import yt.mods - import yt - import IPython + import yt + import yt.mods + local_ns = yt.mods.__dict__.copy() local_ns["ds"] = args.ds local_ns["pf"] = args.ds @@ -990,8 +989,8 @@ class YTMapserverCmd(YTCommand): """ def __call__(self, args): - from yt.visualization.mapserver.pannable_map import PannableMapServer from yt.frontends.ramses.data_structures import RAMSESDataset + from yt.visualization.mapserver.pannable_map import PannableMapServer # For RAMSES datasets, use the bbox feature to make the dataset load faster if RAMSESDataset._is_valid(args.ds) and args.center and args.width: @@ -1119,7 +1118,7 @@ class YTPastebinCmd(YTCommand): """ def __call__(self, args): - import yt.utilities.lodgeit as lo + from yt.utilities import lodgeit as lo lo.main( args.file, @@ -1140,7 +1139,7 @@ class YTPastebinGrabCmd(YTCommand): """ def __call__(self, args): - import yt.utilities.lodgeit as lo + from yt.utilities import lodgeit as lo lo.main(None, download=args.number) diff --git a/yt/utilities/cython_fortran_utils.pyx b/yt/utilities/cython_fortran_utils.pyx index 13d73cfb261..c2a349efa32 100644 --- a/yt/utilities/cython_fortran_utils.pyx +++ b/yt/utilities/cython_fortran_utils.pyx @@ -1,10 +1,14 @@ # distutils: libraries = STD_LIBS cimport numpy as np -import numpy as np + import cython +import numpy as np + from libc.stdio cimport * + import struct + cdef INT32_SIZE = sizeof(np.int32_t) cdef DOUBLE_SIZE = sizeof(np.float64_t) diff --git a/yt/utilities/lib/allocation_container.pyx b/yt/utilities/lib/allocation_container.pyx index 9022731e05b..b62a5fbc6f0 100644 --- a/yt/utilities/lib/allocation_container.pyx +++ b/yt/utilities/lib/allocation_container.pyx @@ -9,8 +9,10 @@ An allocation container and memory pool cimport numpy as np + import numpy as np + cdef class ObjectPool: def __cinit__(self): """This class is *not* meant to be initialized directly, but instead diff --git a/yt/utilities/lib/alt_ray_tracers.pyx b/yt/utilities/lib/alt_ray_tracers.pyx index c7ef82f8720..eeda9fe42d7 100644 --- a/yt/utilities/lib/alt_ray_tracers.pyx +++ b/yt/utilities/lib/alt_ray_tracers.pyx @@ -8,9 +8,11 @@ import numpy as np -cimport numpy as np + cimport cython cimport libc.math as math +cimport numpy as np + @cython.boundscheck(False) @cython.wraparound(False) diff --git a/yt/utilities/lib/amr_kdtools.pyx b/yt/utilities/lib/amr_kdtools.pyx index fbf39501e68..909c82da46a 100644 --- a/yt/utilities/lib/amr_kdtools.pyx +++ b/yt/utilities/lib/amr_kdtools.pyx @@ -9,10 +9,11 @@ AMR kD-Tree Cython Tools import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, free +cimport numpy as np from cython.view cimport array as cvarray +from libc.stdlib cimport free, malloc DEF Nch = 4 diff --git a/yt/utilities/lib/autogenerated_element_samplers.pyx b/yt/utilities/lib/autogenerated_element_samplers.pyx index 50ee066f2e1..e3e30d0fcb1 100644 --- a/yt/utilities/lib/autogenerated_element_samplers.pyx +++ b/yt/utilities/lib/autogenerated_element_samplers.pyx @@ -4,10 +4,10 @@ # yt/utilities/mesh_code_generation.py. -cimport cython -from libc.math cimport pow +cimport cython +from libc.math cimport pow + - @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) diff --git a/yt/utilities/lib/basic_octree.pyx b/yt/utilities/lib/basic_octree.pyx index 59a56831680..54af2ffeb49 100644 --- a/yt/utilities/lib/basic_octree.pyx +++ b/yt/utilities/lib/basic_octree.pyx @@ -10,15 +10,18 @@ A refine-by-two AMR-specific octree import numpy as np -cimport numpy as np + +cimport cython # Double up here for def'd functions +cimport numpy as np cimport numpy as cnp -cimport cython +from libc.stdlib cimport abs, free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, iclip, imax, imin -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip -from libc.stdlib cimport malloc, free, abs +import sys +import time -import sys, time cdef extern from "platform_dep.h": # NOTE that size_t might not be int diff --git a/yt/utilities/lib/bitarray.pyx b/yt/utilities/lib/bitarray.pyx index 8f7c1f385c4..e1884dd2e07 100644 --- a/yt/utilities/lib/bitarray.pyx +++ b/yt/utilities/lib/bitarray.pyx @@ -8,9 +8,11 @@ Bit array functions import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, free +cimport numpy as np +from libc.stdlib cimport free, malloc + cdef class bitarray: diff --git a/yt/utilities/lib/bounded_priority_queue.pyx b/yt/utilities/lib/bounded_priority_queue.pyx index a035a8d3533..6dd05024d48 100644 --- a/yt/utilities/lib/bounded_priority_queue.pyx +++ b/yt/utilities/lib/bounded_priority_queue.pyx @@ -11,10 +11,11 @@ element at the beginning - this exploited to store nearest neighbour lists. import numpy as np -cimport numpy as np cimport cython -from cpython.mem cimport PyMem_Malloc, PyMem_Realloc, PyMem_Free +cimport numpy as np +from cpython.mem cimport PyMem_Free, PyMem_Malloc, PyMem_Realloc + cdef class BoundedPriorityQueue: def __cinit__(self, np.intp_t max_elements, np.intp_t pids=0): diff --git a/yt/utilities/lib/bounding_volume_hierarchy.pyx b/yt/utilities/lib/bounding_volume_hierarchy.pyx index fe40961fd92..7b7dc37c91f 100644 --- a/yt/utilities/lib/bounding_volume_hierarchy.pyx +++ b/yt/utilities/lib/bounding_volume_hierarchy.pyx @@ -3,40 +3,44 @@ # distutils: extra_compile_args = OMP_ARGS # distutils: extra_link_args = OMP_ARGS cimport cython + import numpy as np + cimport numpy as np from libc.math cimport fabs -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc + from cython.parallel import parallel, prange -from .image_samplers cimport ImageSampler +from yt.utilities.lib.element_mappings cimport ( + ElementSampler, + P1Sampler3D, + Q1Sampler3D, + S2Sampler3D, + Tet2Sampler3D, + W1Sampler3D, +) +from yt.utilities.lib.primitives cimport ( + BBox, + Patch, + Ray, + TetPatch, + Triangle, + patch_bbox, + patch_centroid, + ray_bbox_intersect, + ray_patch_intersect, + ray_tet_patch_intersect, + ray_triangle_intersect, + tet_patch_bbox, + tet_patch_centroid, + triangle_bbox, + triangle_centroid, +) +from yt.utilities.lib.vec3_ops cimport L2_norm -from yt.utilities.lib.primitives cimport \ - BBox, \ - Ray, \ - ray_bbox_intersect, \ - Triangle, \ - ray_triangle_intersect, \ - triangle_centroid, \ - triangle_bbox, \ - Patch, \ - ray_patch_intersect, \ - patch_centroid, \ - patch_bbox, \ - TetPatch, \ - ray_tet_patch_intersect, \ - tet_patch_centroid, \ - tet_patch_bbox - -from yt.utilities.lib.element_mappings cimport \ - ElementSampler, \ - Q1Sampler3D, \ - P1Sampler3D, \ - W1Sampler3D, \ - S2Sampler3D, \ - Tet2Sampler3D +from .image_samplers cimport ImageSampler -from yt.utilities.lib.vec3_ops cimport L2_norm cdef ElementSampler Q1Sampler = Q1Sampler3D() cdef ElementSampler P1Sampler = P1Sampler3D() diff --git a/yt/utilities/lib/contour_finding.pyx b/yt/utilities/lib/contour_finding.pyx index 948fffac563..43092e4b28d 100644 --- a/yt/utilities/lib/contour_finding.pyx +++ b/yt/utilities/lib/contour_finding.pyx @@ -8,23 +8,25 @@ A two-pass contour finding algorithm """ from __future__ import print_function + import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np from cython cimport floating -from libc.stdlib cimport malloc, free, realloc +from libc.stdlib cimport free, malloc, realloc + +from yt.geometry.oct_container cimport OctInfo, OctreeContainer +from yt.geometry.oct_visitors cimport Oct from yt.utilities.lib.fp_utils cimport imax -from yt.geometry.oct_container cimport \ - OctreeContainer, OctInfo -from yt.geometry.oct_visitors cimport \ - Oct + from .amr_kdtools cimport Node -from .partitioned_grid cimport \ - PartitionedGrid -from .volume_container cimport \ - VolumeContainer, vc_index, vc_pos_index +from .partitioned_grid cimport PartitionedGrid +from .volume_container cimport VolumeContainer, vc_index, vc_pos_index + import sys + cdef inline ContourID *contour_create(np.int64_t contour_id, ContourID *prev = NULL): node = malloc(sizeof(ContourID)) diff --git a/yt/utilities/lib/cosmology_time.pyx b/yt/utilities/lib/cosmology_time.pyx index 7365621209e..4d36ed6cf3e 100644 --- a/yt/utilities/lib/cosmology_time.pyx +++ b/yt/utilities/lib/cosmology_time.pyx @@ -1,7 +1,10 @@ cimport numpy as np + import numpy as np + from libc.math cimport sqrt + cdef double dadtau(double aexp_tau,double O_mat_0,double O_vac_0,double O_k_0): cdef double aexp_tau3 = aexp_tau * aexp_tau * aexp_tau return sqrt( aexp_tau3 * (O_mat_0 + O_vac_0*aexp_tau3 + O_k_0*aexp_tau) ) diff --git a/yt/utilities/lib/cykdtree/kdtree.pyx b/yt/utilities/lib/cykdtree/kdtree.pyx index 383e901d2ca..30b4094862e 100644 --- a/yt/utilities/lib/cykdtree/kdtree.pyx +++ b/yt/utilities/lib/cykdtree/kdtree.pyx @@ -6,14 +6,14 @@ # distutils: extra_compile_args = -std=c++03 import cython import numpy as np -cimport numpy as np -from libc.stdlib cimport malloc, free -from libcpp cimport bool as cbool +cimport numpy as np from cpython cimport bool as pybool from cython.operator cimport dereference +from libc.stdint cimport int32_t, int64_t, uint32_t, uint64_t +from libc.stdlib cimport free, malloc +from libcpp cimport bool as cbool -from libc.stdint cimport uint32_t, uint64_t, int32_t, int64_t cdef class PyNode: r"""A container for leaf info. diff --git a/yt/utilities/lib/cykdtree/utils.pyx b/yt/utilities/lib/cykdtree/utils.pyx index d5dba21b9db..1b3df39d979 100644 --- a/yt/utilities/lib/cykdtree/utils.pyx +++ b/yt/utilities/lib/cykdtree/utils.pyx @@ -4,15 +4,17 @@ # distutils: language = c++ # distutils: extra_compile_args = -std=c++03 import numpy as np -cimport numpy as np + cimport cython -from libcpp.vector cimport vector -from libcpp.pair cimport pair +cimport numpy as np +from libc.stdint cimport int32_t, int64_t, uint32_t, uint64_t from libcpp cimport bool as cbool -from libc.stdint cimport uint32_t, uint64_t, int64_t, int32_t +from libcpp.pair cimport pair +from libcpp.vector cimport vector import copy + def py_max_pts(np.ndarray[np.float64_t, ndim=2] pos): r"""Get the maximum of points along each coordinate. diff --git a/yt/utilities/lib/cyoctree.pyx b/yt/utilities/lib/cyoctree.pyx index 8dc02c599a1..4e252dc469b 100644 --- a/yt/utilities/lib/cyoctree.pyx +++ b/yt/utilities/lib/cyoctree.pyx @@ -12,17 +12,19 @@ CyOctree building, loading and refining routines cimport numpy as np + import numpy as np + cimport cython + import struct -from libcpp.vector cimport vector -from libcpp cimport bool cimport libc.math as math -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc +from libcpp cimport bool +from libcpp.vector cimport vector -from yt.geometry.particle_deposit cimport \ - kernel_func, get_kernel_func +from yt.geometry.particle_deposit cimport get_kernel_func, kernel_func ################################################################################ # OCTREE IMPLEMENTATION DETAILS # diff --git a/yt/utilities/lib/depth_first_octree.pyx b/yt/utilities/lib/depth_first_octree.pyx index 7b7b6de5a44..1e79254cee8 100644 --- a/yt/utilities/lib/depth_first_octree.pyx +++ b/yt/utilities/lib/depth_first_octree.pyx @@ -9,8 +9,10 @@ This is a recursive function to return a depth-first octree import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + cdef class position: cdef public int output_pos, refined_pos diff --git a/yt/utilities/lib/distance_queue.pyx b/yt/utilities/lib/distance_queue.pyx index 6eb1aa9730e..ad3abf21acb 100644 --- a/yt/utilities/lib/distance_queue.pyx +++ b/yt/utilities/lib/distance_queue.pyx @@ -9,9 +9,12 @@ Distance queue implementation """ cimport numpy as np + import numpy as np + cimport cython + cdef int Neighbor_compare(void *on1, void *on2) nogil: cdef NeighborList *n1 cdef NeighborList *n2 diff --git a/yt/utilities/lib/element_mappings.pyx b/yt/utilities/lib/element_mappings.pyx index 26ef449a2cc..9b8778edcb3 100644 --- a/yt/utilities/lib/element_mappings.pyx +++ b/yt/utilities/lib/element_mappings.pyx @@ -8,24 +8,29 @@ interpolation on finite element data. """ +cimport cython cimport numpy as np from numpy cimport ndarray -cimport cython + import numpy as np + from libc.math cimport fabs -from yt.utilities.lib.autogenerated_element_samplers cimport \ - Q1Function3D, \ - Q1Jacobian3D, \ - Q1Function2D, \ - Q1Jacobian2D, \ - Q2Function2D, \ - Q2Jacobian2D, \ - W1Function3D, \ - W1Jacobian3D, \ - T2Function2D, \ - T2Jacobian2D, \ - Tet2Function3D, \ - Tet2Jacobian3D + +from yt.utilities.lib.autogenerated_element_samplers cimport ( + Q1Function2D, + Q1Function3D, + Q1Jacobian2D, + Q1Jacobian3D, + Q2Function2D, + Q2Jacobian2D, + T2Function2D, + T2Jacobian2D, + Tet2Function3D, + Tet2Jacobian3D, + W1Function3D, + W1Jacobian3D, +) + cdef extern from "platform_dep.h": double fmax(double x, double y) nogil diff --git a/yt/utilities/lib/embree_mesh/mesh_construction.pyx b/yt/utilities/lib/embree_mesh/mesh_construction.pyx index f883e0d73b8..edad12b2d3f 100644 --- a/yt/utilities/lib/embree_mesh/mesh_construction.pyx +++ b/yt/utilities/lib/embree_mesh/mesh_construction.pyx @@ -14,32 +14,28 @@ Note - this file is only used for the Embree-accelerated ray-tracer. import numpy as np + cimport cython -from libc.stdlib cimport malloc, free -from libc.math cimport fmax, sqrt cimport numpy as np - cimport pyembree.rtcore as rtc cimport pyembree.rtcore_geometry as rtcg -cimport pyembree.rtcore_ray as rtcr cimport pyembree.rtcore_geometry_user as rtcgu -from pyembree.rtcore cimport \ - Vertex, \ - Triangle, \ - Vec3f - +cimport pyembree.rtcore_ray as rtcr +from libc.math cimport fmax, sqrt +from libc.stdlib cimport free, malloc +from mesh_intersection cimport ( + patchBoundsFunc, + patchIntersectFunc, + tet_patchBoundsFunc, + tet_patchIntersectFunc, +) +from mesh_samplers cimport sample_hex, sample_tetra, sample_wedge from mesh_traversal cimport YTEmbreeScene -from mesh_samplers cimport \ - sample_hex, \ - sample_tetra, \ - sample_wedge -from mesh_intersection cimport \ - patchIntersectFunc, \ - patchBoundsFunc, \ - tet_patchIntersectFunc, \ - tet_patchBoundsFunc +from pyembree.rtcore cimport Triangle, Vec3f, Vertex + from yt.utilities.exceptions import YTElementTypeNotRecognized + cdef extern from "mesh_triangulation.h": enum: MAX_NUM_TRI diff --git a/yt/utilities/lib/embree_mesh/mesh_intersection.pyx b/yt/utilities/lib/embree_mesh/mesh_intersection.pyx index a130f0beaef..deec01a44f0 100644 --- a/yt/utilities/lib/embree_mesh/mesh_intersection.pyx +++ b/yt/utilities/lib/embree_mesh/mesh_intersection.pyx @@ -11,27 +11,29 @@ Note - this file is only used for the Embree-accelerated ray-tracer. """ +cimport cython +cimport numpy as np cimport pyembree.rtcore as rtc -cimport pyembree.rtcore_ray as rtcr cimport pyembree.rtcore_geometry as rtcg +cimport pyembree.rtcore_ray as rtcr +from libc.math cimport fabs, fmax, fmin, sqrt from pyembree.rtcore cimport Vec3f -cimport numpy as np -cimport cython -from libc.math cimport fabs, fmin, fmax, sqrt -from .mesh_samplers cimport sample_hex20, sample_tet10 + from yt.utilities.lib.bounding_volume_hierarchy cimport BBox -from yt.utilities.lib.primitives cimport \ - patchSurfaceFunc, \ - patchSurfaceDerivU, \ - patchSurfaceDerivV, \ - RayHitData, \ - compute_patch_hit, \ - tet_patchSurfaceFunc, \ - tet_patchSurfaceDerivU, \ - tet_patchSurfaceDerivV, \ - compute_tet_patch_hit -from yt.utilities.lib.vec3_ops cimport \ - dot, subtract, cross, distance +from yt.utilities.lib.primitives cimport ( + RayHitData, + compute_patch_hit, + compute_tet_patch_hit, + patchSurfaceDerivU, + patchSurfaceDerivV, + patchSurfaceFunc, + tet_patchSurfaceDerivU, + tet_patchSurfaceDerivV, + tet_patchSurfaceFunc, +) +from yt.utilities.lib.vec3_ops cimport cross, distance, dot, subtract + +from .mesh_samplers cimport sample_hex20, sample_tet10 @cython.boundscheck(False) diff --git a/yt/utilities/lib/embree_mesh/mesh_samplers.pyx b/yt/utilities/lib/embree_mesh/mesh_samplers.pyx index 34ae8feff65..d87f50328ea 100644 --- a/yt/utilities/lib/embree_mesh/mesh_samplers.pyx +++ b/yt/utilities/lib/embree_mesh/mesh_samplers.pyx @@ -11,24 +11,25 @@ Note - this file is only used for the Embree-accelerated ray-tracer. """ +cimport cython +cimport numpy as np cimport pyembree.rtcore as rtc cimport pyembree.rtcore_ray as rtcr -from pyembree.rtcore cimport Vec3f, Triangle, Vertex -from .mesh_construction cimport \ - MeshDataContainer, \ - Patch, \ - Tet_Patch -from yt.utilities.lib.primitives cimport patchSurfaceFunc, tet_patchSurfaceFunc -from yt.utilities.lib.element_mappings cimport \ - ElementSampler, \ - P1Sampler3D, \ - Q1Sampler3D, \ - S2Sampler3D, \ - W1Sampler3D, \ - Tet2Sampler3D -cimport numpy as np -cimport cython from libc.math cimport fabs, fmax +from pyembree.rtcore cimport Triangle, Vec3f, Vertex + +from yt.utilities.lib.element_mappings cimport ( + ElementSampler, + P1Sampler3D, + Q1Sampler3D, + S2Sampler3D, + Tet2Sampler3D, + W1Sampler3D, +) +from yt.utilities.lib.primitives cimport patchSurfaceFunc, tet_patchSurfaceFunc + +from .mesh_construction cimport MeshDataContainer, Patch, Tet_Patch + cdef ElementSampler Q1Sampler = Q1Sampler3D() cdef ElementSampler P1Sampler = P1Sampler3D() diff --git a/yt/utilities/lib/embree_mesh/mesh_traversal.pyx b/yt/utilities/lib/embree_mesh/mesh_traversal.pyx index 8236ea13d23..4c9211baa71 100644 --- a/yt/utilities/lib/embree_mesh/mesh_traversal.pyx +++ b/yt/utilities/lib/embree_mesh/mesh_traversal.pyx @@ -12,16 +12,21 @@ mesh source using either pyembree or the cython ray caster. cimport cython cimport numpy as np + import numpy as np -from libc.stdlib cimport malloc, free + cimport pyembree.rtcore as rtc -cimport pyembree.rtcore_ray as rtcr cimport pyembree.rtcore_geometry as rtcg +cimport pyembree.rtcore_ray as rtcr cimport pyembree.rtcore_scene as rtcs -from yt.utilities.lib.image_samplers cimport \ - ImageSampler -from cython.parallel import prange, parallel, threadid +from libc.stdlib cimport free, malloc + +from yt.utilities.lib.image_samplers cimport ImageSampler + +from cython.parallel import parallel, prange, threadid + from yt.visualization.image_writer import apply_colormap + from yt.utilities.lib.bounding_volume_hierarchy cimport BVH, Ray rtc.rtcInit(NULL) diff --git a/yt/utilities/lib/ewah_bool_wrap.pyx b/yt/utilities/lib/ewah_bool_wrap.pyx index 7ded579f5c1..8564d173a53 100644 --- a/yt/utilities/lib/ewah_bool_wrap.pyx +++ b/yt/utilities/lib/ewah_bool_wrap.pyx @@ -10,16 +10,22 @@ Wrapper for EWAH Bool Array: https://github.com/lemire/EWAHBoolArray import struct -from libcpp.map cimport map as cmap -from libcpp.map cimport map -from libcpp.algorithm cimport sort -from libc.stdlib cimport malloc, free, qsort + from cython.operator cimport dereference, preincrement +from libc.stdlib cimport free, malloc, qsort +from libcpp.algorithm cimport sort +from libcpp.map cimport map as cmap + import numpy as np -cimport numpy as np + cimport cython -from yt.utilities.lib.geometry_utils cimport \ - morton_neighbors_coarse, morton_neighbors_refined +cimport numpy as np + +from yt.utilities.lib.geometry_utils cimport ( + morton_neighbors_coarse, + morton_neighbors_refined, +) + cdef extern from "" namespace "std" nogil: Iter unique[Iter](Iter first, Iter last) diff --git a/yt/utilities/lib/fnv_hash.pyx b/yt/utilities/lib/fnv_hash.pyx index 69af4749afd..9605b063e94 100644 --- a/yt/utilities/lib/fnv_hash.pyx +++ b/yt/utilities/lib/fnv_hash.pyx @@ -8,9 +8,10 @@ Fast hashing routines import numpy as np -cimport numpy as np cimport cython +cimport numpy as np + @cython.wraparound(False) @cython.boundscheck(False) diff --git a/yt/utilities/lib/fortran_reader.pyx b/yt/utilities/lib/fortran_reader.pyx index 0cb90710a1f..1be39cb0ce8 100644 --- a/yt/utilities/lib/fortran_reader.pyx +++ b/yt/utilities/lib/fortran_reader.pyx @@ -9,11 +9,11 @@ Simple readers for fortran unformatted data, specifically for the Tiger code. import numpy as np -cimport numpy as np -cimport cython -from libc.stdio cimport fopen, fclose, FILE +cimport cython cimport libc.stdlib as stdlib +cimport numpy as np +from libc.stdio cimport FILE, fclose, fopen #cdef inline int imax(int i0, int i1): #if i0 > i1: return i0 diff --git a/yt/utilities/lib/geometry_utils.pyx b/yt/utilities/lib/geometry_utils.pyx index bf468d5f984..fe6fffde349 100644 --- a/yt/utilities/lib/geometry_utils.pyx +++ b/yt/utilities/lib/geometry_utils.pyx @@ -10,15 +10,18 @@ Simple integrators for the radiative transfer equation import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np from cython cimport floating -from libc.stdlib cimport malloc, free -from yt.utilities.lib.fp_utils cimport fclip, i64clip from libc.math cimport copysign, fabs +from libc.stdlib cimport free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, i64clip + from yt.utilities.exceptions import YTDomainOverflow -from yt.utilities.lib.vec3_ops cimport subtract, cross, dot, L2_norm +from yt.utilities.lib.vec3_ops cimport L2_norm, cross, dot, subtract DEF ORDER_MAX=20 DEF INDEX_MAX_64=2097151 diff --git a/yt/utilities/lib/grid_traversal.pyx b/yt/utilities/lib/grid_traversal.pyx index c80678ce0a2..b17aded61fe 100644 --- a/yt/utilities/lib/grid_traversal.pyx +++ b/yt/utilities/lib/grid_traversal.pyx @@ -10,17 +10,34 @@ Simple integrators for the radiative transfer equation import numpy as np -cimport numpy as np + cimport cython -#cimport healpix_interface -from libc.stdlib cimport malloc, calloc, free, abs -from libc.math cimport exp, floor, log2, \ - fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI, sqrt -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip -from field_interpolation_tables cimport \ - FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\ - FIT_eval_transfer_with_light +cimport numpy as np +from field_interpolation_tables cimport ( + FieldInterpolationTable, + FIT_eval_transfer, + FIT_eval_transfer_with_light, + FIT_initialize_table, +) from fixed_interpolator cimport * +from libc.math cimport ( + M_PI, + acos, + asin, + atan, + atan2, + cos, + exp, + fabs, + floor, + log2, + sin, + sqrt, +) +#cimport healpix_interface +from libc.stdlib cimport abs, calloc, free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, i64clip, iclip, imax, imin DEF Nch = 4 diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 28bbb19e62f..978445b0751 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -12,34 +12,55 @@ Image sampler definitions import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, calloc, free, abs -from libc.math cimport exp, floor, log2, \ - fabs, atan, atan2, asin, cos, sin, sqrt, acos, M_PI -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip -from field_interpolation_tables cimport \ - FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\ - FIT_eval_transfer_with_light cimport lenses +cimport numpy as np +from field_interpolation_tables cimport ( + FieldInterpolationTable, + FIT_eval_transfer, + FIT_eval_transfer_with_light, + FIT_initialize_table, +) +from libc.math cimport ( + M_PI, + acos, + asin, + atan, + atan2, + cos, + exp, + fabs, + floor, + log2, + sin, + sqrt, +) +from libc.stdlib cimport abs, calloc, free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, i64clip, iclip, imax, imin + +from .fixed_interpolator cimport ( + eval_gradient, + fast_interpolate, + offset_fill, + offset_interpolate, + trilinear_interpolate, + vertex_interp, +) from .grid_traversal cimport walk_volume -from .fixed_interpolator cimport \ - offset_interpolate, \ - fast_interpolate, \ - trilinear_interpolate, \ - eval_gradient, \ - offset_fill, \ - vertex_interp + cdef extern from "platform_dep.h": long int lrint(double x) nogil DEF Nch = 4 -from cython.parallel import prange, parallel, threadid -from vec3_ops cimport dot, subtract, L2_norm, fma +from cython.parallel import parallel, prange, threadid from cpython.exc cimport PyErr_CheckSignals +from vec3_ops cimport L2_norm, dot, fma, subtract + cdef struct VolumeRenderAccumulator: int n_fits diff --git a/yt/utilities/lib/image_utilities.pyx b/yt/utilities/lib/image_utilities.pyx index 75687df4c1e..013dae99185 100644 --- a/yt/utilities/lib/image_utilities.pyx +++ b/yt/utilities/lib/image_utilities.pyx @@ -6,10 +6,13 @@ Utilities for images import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + from yt.utilities.lib.fp_utils cimport iclip + def add_points_to_greyscale_image( np.ndarray[np.float64_t, ndim=2] buffer, np.ndarray[np.int_t, ndim=2] buffer_mask, diff --git a/yt/utilities/lib/interpolators.pyx b/yt/utilities/lib/interpolators.pyx index 42c0e445697..5ff6deb6f71 100644 --- a/yt/utilities/lib/interpolators.pyx +++ b/yt/utilities/lib/interpolators.pyx @@ -9,9 +9,12 @@ Simple interpolators import numpy as np -cimport numpy as np + cimport cython -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip +cimport numpy as np + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, iclip, imax, imin + @cython.cdivision(True) @cython.wraparound(False) diff --git a/yt/utilities/lib/lenses.pyx b/yt/utilities/lib/lenses.pyx index f435143747d..f016527f5c4 100644 --- a/yt/utilities/lib/lenses.pyx +++ b/yt/utilities/lib/lenses.pyx @@ -9,10 +9,13 @@ Functions for computing the extent of lenses and whatnot import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + from .image_samplers cimport ImageSampler + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) diff --git a/yt/utilities/lib/line_integral_convolution.pyx b/yt/utilities/lib/line_integral_convolution.pyx index 09d68abb82c..a68af52d783 100644 --- a/yt/utilities/lib/line_integral_convolution.pyx +++ b/yt/utilities/lib/line_integral_convolution.pyx @@ -8,8 +8,10 @@ Utilities for line integral convolution annotation import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + @cython.cdivision(True) cdef void _advance_2d(double vx, double vy, diff --git a/yt/utilities/lib/marching_cubes.pyx b/yt/utilities/lib/marching_cubes.pyx index a23db6d3561..1dd8d028ef9 100644 --- a/yt/utilities/lib/marching_cubes.pyx +++ b/yt/utilities/lib/marching_cubes.pyx @@ -9,20 +9,25 @@ Marching cubes implementation """ -cimport numpy as np cimport cython +cimport numpy as np + import numpy as np -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip -from libc.stdlib cimport malloc, free, abs + +from fixed_interpolator cimport ( + eval_gradient, + offset_fill, + offset_interpolate, + vertex_interp, +) from libc.math cimport sqrt -from fixed_interpolator cimport \ - eval_gradient, \ - offset_fill, \ - offset_interpolate, \ - vertex_interp +from libc.stdlib cimport abs, free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, iclip, imax, imin from yt.units.yt_array import YTArray + cdef extern from "marching_cubes.h": int tri_table[256][16] int edge_table[256] diff --git a/yt/utilities/lib/mesh_triangulation.pyx b/yt/utilities/lib/mesh_triangulation.pyx index 880cde4097d..0211534eb20 100644 --- a/yt/utilities/lib/mesh_triangulation.pyx +++ b/yt/utilities/lib/mesh_triangulation.pyx @@ -14,12 +14,14 @@ renderer, as well as when annotating mesh lines on regular slices. """ import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, free +cimport numpy as np +from libc.stdlib cimport free, malloc from yt.utilities.exceptions import YTElementTypeNotRecognized + cdef extern from "mesh_triangulation.h": enum: MAX_NUM_TRI diff --git a/yt/utilities/lib/mesh_utilities.pyx b/yt/utilities/lib/mesh_utilities.pyx index 4fb3763f800..4fd635937d4 100644 --- a/yt/utilities/lib/mesh_utilities.pyx +++ b/yt/utilities/lib/mesh_utilities.pyx @@ -9,12 +9,16 @@ Utilities for unstructured and semi-structured meshes import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, free, abs -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip, i64clip +cimport numpy as np +from libc.stdlib cimport abs, free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, i64clip, iclip, imax, imin + from yt.units.yt_array import YTArray + cdef extern from "platform_dep.h": double rint(double x) diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 52af0e6522d..510b2a01cec 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -9,22 +9,22 @@ Simple utilities that don't fit anywhere else """ -from yt.funcs import get_pbar import numpy as np + +from yt.funcs import get_pbar from yt.units.yt_array import YTArray -cimport numpy as np + cimport cython cimport libc.math as math +cimport numpy as np +from cpython cimport buffer +from cython.view cimport array as cvarray, memoryview from libc.math cimport abs, sqrt -from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max -from yt.geometry.selection_routines cimport _ensure_code - -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc from libc.string cimport strcmp -from cython.view cimport memoryview -from cython.view cimport array as cvarray -from cpython cimport buffer +from yt.geometry.selection_routines cimport _ensure_code +from yt.utilities.lib.fp_utils cimport fmax, fmin, i64max, i64min cdef extern from "platform_dep.h": @@ -32,8 +32,10 @@ cdef extern from "platform_dep.h": void *alloca(int) from cython.parallel import prange + from cpython.exc cimport PyErr_CheckSignals + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) diff --git a/yt/utilities/lib/origami.pyx b/yt/utilities/lib/origami.pyx index 3631064e354..e84f205b6f1 100644 --- a/yt/utilities/lib/origami.pyx +++ b/yt/utilities/lib/origami.pyx @@ -10,8 +10,10 @@ This calls the ORIGAMI routines import numpy as np + cimport numpy as np -from libc.stdlib cimport malloc, free +from libc.stdlib cimport free, malloc + cdef extern from "origami_tags.h": int compute_tags(int ng, double boxsize, double **r, int npart, diff --git a/yt/utilities/lib/particle_kdtree_tools.pyx b/yt/utilities/lib/particle_kdtree_tools.pyx index 17d41765e68..8aefaca21d2 100644 --- a/yt/utilities/lib/particle_kdtree_tools.pyx +++ b/yt/utilities/lib/particle_kdtree_tools.pyx @@ -8,31 +8,20 @@ Cython tools for working with the PyKDTree particle KDTree. import numpy as np -cimport numpy as np cimport cython - +cimport numpy as np from cpython.exc cimport PyErr_CheckSignals -from yt.utilities.lib.cykdtree.kdtree cimport ( - PyKDTree, - KDTree, - Node, - uint64_t, - uint32_t, -) - from libc.math cimport sqrt from libcpp.vector cimport vector +from yt.utilities.lib.cykdtree.kdtree cimport KDTree, Node, PyKDTree, uint32_t, uint64_t + from yt.funcs import get_pbar -from yt.geometry.particle_deposit cimport ( - get_kernel_func, - kernel_func, -) -from yt.utilities.lib.bounded_priority_queue cimport ( - BoundedPriorityQueue, - NeighborList -) + +from yt.geometry.particle_deposit cimport get_kernel_func, kernel_func +from yt.utilities.lib.bounded_priority_queue cimport BoundedPriorityQueue, NeighborList + cdef int CHUNKSIZE = 4096 diff --git a/yt/utilities/lib/particle_mesh_operations.pyx b/yt/utilities/lib/particle_mesh_operations.pyx index 129b8a63e97..dada11c661a 100644 --- a/yt/utilities/lib/particle_mesh_operations.pyx +++ b/yt/utilities/lib/particle_mesh_operations.pyx @@ -8,10 +8,13 @@ Simple integrators for the radiative transfer equation """ -cimport numpy as np cimport cython +cimport numpy as np + import numpy as np -from yt.utilities.lib.fp_utils cimport imax, fmax, imin, fmin, iclip, fclip + +from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, iclip, imax, imin + @cython.boundscheck(False) @cython.wraparound(False) diff --git a/yt/utilities/lib/partitioned_grid.pyx b/yt/utilities/lib/partitioned_grid.pyx index 7137fca2784..b943b62e0db 100644 --- a/yt/utilities/lib/partitioned_grid.pyx +++ b/yt/utilities/lib/partitioned_grid.pyx @@ -10,11 +10,14 @@ Image sampler definitions import numpy as np -cimport numpy as np + cimport cython -from libc.stdlib cimport malloc, calloc, free, abs +cimport numpy as np +from libc.stdlib cimport abs, calloc, free, malloc + from .fixed_interpolator cimport offset_interpolate + cdef class PartitionedGrid: @cython.boundscheck(False) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index fb20e0f89e3..a5e5dd31700 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -13,46 +13,55 @@ Pixelization routines import numpy as np -cimport numpy as np -cimport cython -from cython.view cimport array as cvarray +cimport cython cimport libc.math as math -from yt.utilities.lib.fp_utils cimport fmin, fmax, i64min, i64max, imin, \ - imax, fabs, iclip -from yt.utilities.exceptions import \ - YTPixelizeError, \ - YTElementTypeNotRecognized -from libc.stdlib cimport malloc, free -from vec3_ops cimport dot, cross, subtract -from yt.utilities.lib.element_mappings cimport \ - ElementSampler, \ - P1Sampler1D, \ - P1Sampler2D, \ - P1Sampler3D, \ - Q1Sampler3D, \ - Q1Sampler2D, \ - Q2Sampler2D, \ - S2Sampler3D, \ - W1Sampler3D, \ - T2Sampler2D, \ - Tet2Sampler3D -from yt.geometry.particle_deposit cimport \ - kernel_func, get_kernel_func -from cython.parallel cimport prange +cimport numpy as np +from cython.view cimport array as cvarray + +from yt.utilities.lib.fp_utils cimport ( + fabs, + fmax, + fmin, + i64max, + i64min, + iclip, + imax, + imin, +) + +from yt.utilities.exceptions import YTElementTypeNotRecognized, YTPixelizeError + from cpython.exc cimport PyErr_CheckSignals -from yt.funcs import get_pbar -from yt.utilities.lib.cykdtree.kdtree cimport ( - PyKDTree, - KDTree, - Node, - uint64_t, - uint32_t, +from cython.parallel cimport prange +from libc.stdlib cimport free, malloc +from vec3_ops cimport cross, dot, subtract + +from yt.geometry.particle_deposit cimport get_kernel_func, kernel_func +from yt.utilities.lib.element_mappings cimport ( + ElementSampler, + P1Sampler1D, + P1Sampler2D, + P1Sampler3D, + Q1Sampler2D, + Q1Sampler3D, + Q2Sampler2D, + S2Sampler3D, + T2Sampler2D, + Tet2Sampler3D, + W1Sampler3D, ) -from yt.utilities.lib.particle_kdtree_tools cimport find_neighbors, \ - axes_range, \ - set_axes_range + +from yt.funcs import get_pbar + from yt.utilities.lib.bounded_priority_queue cimport BoundedPriorityQueue +from yt.utilities.lib.cykdtree.kdtree cimport KDTree, Node, PyKDTree, uint32_t, uint64_t +from yt.utilities.lib.particle_kdtree_tools cimport ( + axes_range, + find_neighbors, + set_axes_range, +) + cdef int TABLE_NVALS=512 diff --git a/yt/utilities/lib/points_in_volume.pyx b/yt/utilities/lib/points_in_volume.pyx index d4649d27dfc..d50975daf41 100644 --- a/yt/utilities/lib/points_in_volume.pyx +++ b/yt/utilities/lib/points_in_volume.pyx @@ -10,10 +10,12 @@ Checks for points contained in a volume import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np from libc.math cimport sqrt + cdef extern from "math.h": double fabs(double x) diff --git a/yt/utilities/lib/primitives.pyx b/yt/utilities/lib/primitives.pyx index 42b7f8ac85d..11da066010c 100644 --- a/yt/utilities/lib/primitives.pyx +++ b/yt/utilities/lib/primitives.pyx @@ -13,12 +13,15 @@ need to provide three functions: """ cimport cython + import numpy as np -cimport numpy as np + cimport cython.floating +cimport numpy as np from libc.math cimport fabs -from yt.utilities.lib.vec3_ops cimport dot, subtract, cross, distance, L2_norm +from yt.utilities.lib.vec3_ops cimport L2_norm, cross, distance, dot, subtract + cdef np.float64_t DETERMINANT_EPS = 1.0e-10 cdef np.float64_t INF = np.inf diff --git a/yt/utilities/lib/quad_tree.pyx b/yt/utilities/lib/quad_tree.pyx index d00b3fdb28d..88b71cdd5b7 100644 --- a/yt/utilities/lib/quad_tree.pyx +++ b/yt/utilities/lib/quad_tree.pyx @@ -10,15 +10,17 @@ A refine-by-two AMR-specific quadtree import numpy as np -cimport numpy as np -cimport cython -from libc.stdlib cimport malloc, free, abs +cimport cython +cimport numpy as np from cython.operator cimport dereference as deref, preincrement as inc +from libc.stdlib cimport abs, free, malloc + from yt.utilities.lib.fp_utils cimport fmax from yt.utilities.exceptions import YTIntDomainOverflow + cdef extern from "platform_dep.h": # NOTE that size_t might not be int void *alloca(int) diff --git a/yt/utilities/lib/ragged_arrays.pyx b/yt/utilities/lib/ragged_arrays.pyx index 17541d55a68..1f27bc11007 100644 --- a/yt/utilities/lib/ragged_arrays.pyx +++ b/yt/utilities/lib/ragged_arrays.pyx @@ -7,8 +7,10 @@ Some simple operations for operating on ragged arrays import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + cdef fused numpy_dt: np.float32_t diff --git a/yt/utilities/lib/tests/test_geometry_utils.py b/yt/utilities/lib/tests/test_geometry_utils.py index 49df7d64022..24467d7383c 100644 --- a/yt/utilities/lib/tests/test_geometry_utils.py +++ b/yt/utilities/lib/tests/test_geometry_utils.py @@ -47,7 +47,7 @@ def test_compact_bits(): def test_spread_and_compact_bits(): - from yt.utilities.lib.geometry_utils import spread_bits, compact_bits + from yt.utilities.lib.geometry_utils import compact_bits, spread_bits li = [np.uint64(0b111111111111111111111)] for ans in li: @@ -648,7 +648,7 @@ def test_get_morton_neighbors_refined(): def test_morton_neighbor(): - from yt.utilities.lib.geometry_utils import morton_neighbor, get_morton_indices + from yt.utilities.lib.geometry_utils import get_morton_indices, morton_neighbor order = 20 imax = np.uint64(1 << order) @@ -695,7 +695,7 @@ def test_morton_neighbor(): def test_get_morton_neighbors(): - from yt.utilities.lib.geometry_utils import get_morton_neighbors, get_morton_indices + from yt.utilities.lib.geometry_utils import get_morton_indices, get_morton_neighbors order = 20 imax = 1 << order diff --git a/yt/utilities/lib/write_array.pyx b/yt/utilities/lib/write_array.pyx index 291bbbbbb72..673e082d26d 100644 --- a/yt/utilities/lib/write_array.pyx +++ b/yt/utilities/lib/write_array.pyx @@ -7,8 +7,9 @@ Faster, cythonized file IO import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np DTYPE = np.float64 ctypedef np.float64_t DTYPE_t diff --git a/yt/utilities/lodgeit.py b/yt/utilities/lodgeit.py index 3d5d8e4e20a..7649781eef6 100644 --- a/yt/utilities/lodgeit.py +++ b/yt/utilities/lodgeit.py @@ -132,7 +132,7 @@ def copy_url(url): # then give pbcopy a try. do that before gtk because # gtk might be installed on os x but nobody is interested # in the X11 clipboard there. - from subprocess import Popen, PIPE + from subprocess import PIPE, Popen try: client = Popen(["pbcopy"], stdin=PIPE) @@ -141,8 +141,8 @@ def copy_url(url): import pygtk pygtk.require("2.0") - import gtk import gobject + import gtk except ImportError: return gtk.clipboard_get(gtk.gdk.SELECTION_CLIPBOARD).set_text(url) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index c0d8943fc11..2d08bb73b54 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -17,8 +17,8 @@ from yt.utilities.on_demand_imports import _h5py as h5 if sys.version_info < (3, 0): - from .poster.streaminghttp import register_openers from .poster.encode import multipart_encode + from .poster.streaminghttp import register_openers register_openers() else: diff --git a/yt/utilities/poster/encode.py b/yt/utilities/poster/encode.py index be9d491e3dd..9049bde0257 100644 --- a/yt/utilities/poster/encode.py +++ b/yt/utilities/poster/encode.py @@ -25,7 +25,9 @@ def gen_boundary(): except ImportError: - import random, sha + import random + + import sha def gen_boundary(): """Returns a random string to use as the boundary for a message""" diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index d60c1544827..521ba98c96f 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -8,8 +8,8 @@ def get_thingking_deps(): try: - from thingking.httpmmap import HTTPArray from thingking.arbitrary_page import PageCacheURL + from thingking.httpmmap import HTTPArray except ImportError: raise ImportError( "This functionality requires the thingking package to be installed" diff --git a/yt/utilities/voropp.pyx b/yt/utilities/voropp.pyx index 13625774a4a..97452966173 100644 --- a/yt/utilities/voropp.pyx +++ b/yt/utilities/voropp.pyx @@ -6,13 +6,15 @@ Wrapping code for voro++ """ -from cython.operator cimport dereference as deref, preincrement as inc -from libc.stdlib cimport malloc, free, abs, calloc, labs cimport libcpp +from cython.operator cimport dereference as deref, preincrement as inc +from libc.stdlib cimport abs, calloc, free, labs, malloc import numpy as np -cimport numpy as np + cimport cython +cimport numpy as np + cdef extern from "voro++.hh" namespace "voro": cdef cppclass c_loop_all diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index 6ffafbcaba6..9748d307890 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -1,6 +1,5 @@ -import matplotlib.cm as mcm -import matplotlib.colors as cc import numpy as np +from matplotlib import cm as mcm, colors as cc from . import _colormap_data as _cm @@ -566,8 +565,7 @@ def show_colormaps(subset="all", filename=None): If filename is set, then it will save the colormaps to an output file. If it is not set, it will "show" the result interactively. """ - import matplotlib.pyplot as plt - import matplotlib.cm as cm + from matplotlib import cm as cm, pyplot as plt a = np.outer(np.arange(0, 1, 0.01), np.ones(10)) if subset == "all": diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index e26adc887af..22d0b89a589 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -1,8 +1,7 @@ -import matplotlib.pyplot as plt import numpy as np -from matplotlib import cm - import pyx +from matplotlib import cm, pyplot as plt + from yt.config import ytcfg from yt.funcs import issue_deprecation_warning from yt.units.unit_object import Unit diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 464cbfd6b9e..31aa539762e 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -633,7 +633,7 @@ def to_glue(self, label="yt", data_collection=None): add a *label*. If you are already within the Glue environment, you can pass a *data_collection* object, otherwise Glue will be started. """ - from glue.core import DataCollection, Data + from glue.core import Data, DataCollection from glue.core.coordinates import coordinates_from_header try: diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index 94d38faeea7..4975d521761 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -2,12 +2,12 @@ import numpy as np -import yt.utilities.lib.image_utilities as au -import yt.utilities.png_writer as pw from yt.config import ytcfg from yt.funcs import get_brewer_cmap, get_image_suffix, mylog from yt.units.yt_array import YTQuantity +from yt.utilities import png_writer as pw from yt.utilities.exceptions import YTNotInsideNotebook +from yt.utilities.lib import image_utilities as au from . import _colormap_data as cmd from .color_maps import mcm @@ -298,6 +298,7 @@ def strip_colormap_data( ), ): import pprint + from . import color_maps as rcm f = open(fn, "w") @@ -392,8 +393,9 @@ def write_projection( """ if cmap_name is None: cmap_name = ytcfg.get("yt", "default_colormap") - import matplotlib.figure import matplotlib.colors + import matplotlib.figure + from ._mpl_imports import FigureCanvasAgg, FigureCanvasPdf, FigureCanvasPS # If this is rendered as log, then apply now. diff --git a/yt/visualization/mapserver/pannable_map.py b/yt/visualization/mapserver/pannable_map.py index f4c3daede47..a5a2db0d601 100644 --- a/yt/visualization/mapserver/pannable_map.py +++ b/yt/visualization/mapserver/pannable_map.py @@ -1,9 +1,9 @@ import os from functools import wraps +import bottle import numpy as np -import bottle from yt.utilities.lib.misc_utilities import get_color_bounds from yt.utilities.png_writer import write_png_to_string from yt.visualization.fixed_resolution import FixedResolutionBuffer diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 42825364d63..8cccaac83f1 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -616,7 +616,7 @@ def __init__( self.data_source = data_source def __call__(self, plot): - from matplotlib.tri import Triangulation, LinearTriInterpolator + from matplotlib.tri import LinearTriInterpolator, Triangulation # These need to be in code_length x0, x1, y0, y1 = self._physical_bounds(plot) diff --git a/yt/visualization/volume_rendering/input_events.py b/yt/visualization/volume_rendering/input_events.py index d9cfd6f4801..7a733488f9b 100644 --- a/yt/visualization/volume_rendering/input_events.py +++ b/yt/visualization/volume_rendering/input_events.py @@ -1,13 +1,12 @@ # This is a part of the experimental Interactive Data Visualization - import random from collections import defaultdict, namedtuple -import matplotlib.cm as cm +import cyglfw3 as glfw import numpy as np +from matplotlib import cm as cm +from OpenGL import GL as GL -import cyglfw3 as glfw -import OpenGL.GL as GL from yt.utilities.math_utils import get_orthographic_matrix, get_perspective_matrix event_registry = {} diff --git a/yt/visualization/volume_rendering/interactive_loop.py b/yt/visualization/volume_rendering/interactive_loop.py index 1166547a7b3..cf590caea54 100644 --- a/yt/visualization/volume_rendering/interactive_loop.py +++ b/yt/visualization/volume_rendering/interactive_loop.py @@ -1,11 +1,10 @@ # This is a part of the experimental Interactive Data Visualization - import os +import cyglfw3 as glfw import numpy as np +from OpenGL import GL as GL -import cyglfw3 as glfw -import OpenGL.GL as GL from yt import write_bitmap from .input_events import EventCollection, MouseRotation diff --git a/yt/visualization/volume_rendering/interactive_vr.py b/yt/visualization/volume_rendering/interactive_vr.py index 78188276eee..6dbd3699715 100644 --- a/yt/visualization/volume_rendering/interactive_vr.py +++ b/yt/visualization/volume_rendering/interactive_vr.py @@ -1,12 +1,11 @@ # This is a part of the experimental Interactive Data Visualization - import ctypes from collections import OrderedDict -import matplotlib.cm as cm import numpy as np +from matplotlib import cm as cm +from OpenGL import GL as GL -import OpenGL.GL as GL from yt.config import ytcfg from yt.utilities.lib.mesh_triangulation import triangulate_mesh from yt.utilities.math_utils import ( diff --git a/yt/visualization/volume_rendering/interactive_vr_helpers.py b/yt/visualization/volume_rendering/interactive_vr_helpers.py index 164d8f60f25..9ee4d8591bf 100644 --- a/yt/visualization/volume_rendering/interactive_vr_helpers.py +++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py @@ -50,13 +50,13 @@ def _render_opengl( "packages to be installed." ) + from .interactive_loop import RenderingContext from .interactive_vr import ( - SceneGraph, BlockCollection, - TrackballCamera, MeshSceneComponent, + SceneGraph, + TrackballCamera, ) - from .interactive_loop import RenderingContext if isinstance(data_source, Dataset): dobj = data_source.all_data() diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index f47bd39ee64..55f31cd172c 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1189,7 +1189,7 @@ class InteractiveCamera(Camera): frames = [] def snapshot(self, fn=None, clip_ratio=None): - import matplotlib.pylab as pylab + from matplotlib import pylab as pylab pylab.figure(2) self.transfer_function.show() @@ -2027,8 +2027,8 @@ def plot_allsky_healpix( cmin=None, cmax=None, ): - import matplotlib.figure import matplotlib.backends.backend_agg + import matplotlib.figure if rotation is None: rotation = np.eye(3).astype("float64") diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index e1cfeb11b22..7b8df0e4f94 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -295,9 +295,9 @@ def save(self, fname=None, sigma_clip=None): if suffix == ".png": self._last_render.write_png(fname, sigma_clip=sigma_clip) else: - from matplotlib.figure import Figure from matplotlib.backends.backend_pdf import FigureCanvasPdf from matplotlib.backends.backend_ps import FigureCanvasPS + from matplotlib.figure import Figure shape = self._last_render.shape fig = Figure((shape[0] / 100.0, shape[1] / 100.0)) diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index 0e98bbcf59e..eba6c4a4adb 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -1,10 +1,10 @@ # This is a part of the experimental Interactive Data Visualization - import contextlib import os from collections import OrderedDict -import OpenGL.GL as GL +from OpenGL import GL as GL + from yt.units.yt_array import YTQuantity from yt.utilities.exceptions import ( YTInvalidShaderType, diff --git a/yt/visualization/volume_rendering/transfer_function_helper.py b/yt/visualization/volume_rendering/transfer_function_helper.py index db69e042f93..9fed8b76eb2 100644 --- a/yt/visualization/volume_rendering/transfer_function_helper.py +++ b/yt/visualization/volume_rendering/transfer_function_helper.py @@ -154,9 +154,10 @@ def plot(self, fn=None, profile_field=None, profile_weight=None): If fn is None, will return an image to an IPython notebook. """ - from yt.visualization._mpl_imports import FigureCanvasAgg from matplotlib.figure import Figure + from yt.visualization._mpl_imports import FigureCanvasAgg + if self.tf is None: self.build_transfer_function() self.setup_default() From 9180540cafb84c0b85a2b0225959271bb9d885ef Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 21 Jul 2020 10:30:22 +0100 Subject: [PATCH 092/653] Use cmap instead of map --- yt/geometry/particle_oct_container.pyx | 8 ++++---- yt/utilities/lib/ewah_bool_array.pxd | 4 ++-- yt/utilities/lib/ewah_bool_wrap.pyx | 10 +++++----- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/yt/geometry/particle_oct_container.pyx b/yt/geometry/particle_oct_container.pyx index 373c323f238..548e568f361 100644 --- a/yt/geometry/particle_oct_container.pyx +++ b/yt/geometry/particle_oct_container.pyx @@ -13,7 +13,7 @@ Oct container tuned for Particles from libc.math cimport ceil, floor, fmod from libc.stdlib cimport free, malloc, qsort from libc.string cimport memset -from libcpp.map cimport map +from libcpp.map cimport map as cmap from libcpp.vector cimport vector from yt.utilities.lib.ewah_bool_array cimport ( @@ -84,7 +84,7 @@ from ..utilities.lib.ewah_bool_wrap cimport ( SparseUnorderedRefinedBitmaskSet as SparseUnorderedRefinedBitmask, ) -ctypedef map[np.uint64_t, bool_array] CoarseRefinedSets +ctypedef cmap[np.uint64_t, bool_array] CoarseRefinedSets cdef class ParticleOctreeContainer(OctreeContainer): cdef Oct** oct_list @@ -691,7 +691,7 @@ cdef class ParticleBitmap: cdef int axiter[3][2] cdef np.float64_t axiterv[3][2] cdef CoarseRefinedSets coarse_refined_map - cdef map[np.uint64_t, np.uint64_t] refined_count + cdef cmap[np.uint64_t, np.uint64_t] refined_count cdef np.uint64_t nfully_enclosed = 0, n_calls = 0 mi1_max = (1 << self.index_order1) - 1 mi2_max = (1 << self.index_order2) - 1 @@ -1103,7 +1103,7 @@ cdef class ParticleBitmap: arr_two.reset() for ifile in range(nbitmasks): if self.bitmasks._isref(ifile, mi1) == 1: - arr = ( self.bitmasks.ewah_coll)[ifile][0][mi1] + arr = ( self.bitmasks.ewah_coll)[ifile][0][mi1] arr_any.logicaland(arr, arr_two) # Indices in previous files arr_any.logicalor(arr, arr_swap) # All second level indices arr_any = arr_swap diff --git a/yt/utilities/lib/ewah_bool_array.pxd b/yt/utilities/lib/ewah_bool_array.pxd index b8507b71f1f..32b7b881b2c 100644 --- a/yt/utilities/lib/ewah_bool_array.pxd +++ b/yt/utilities/lib/ewah_bool_array.pxd @@ -7,7 +7,7 @@ Wrapper for EWAH Bool Array: https://github.com/lemire/EWAHBoolArray from libcpp.vector cimport vector -from libcpp.map cimport map +from libcpp.map cimport map as cmap from libcpp.string cimport string from libcpp cimport bool from libc.stdint cimport uint64_t, uint32_t @@ -97,6 +97,6 @@ ELSE: ctypedef EWAHBoolArray[ewah_word_type] ewah_bool_array ctypedef EWAHBoolArraySetBitForwardIterator[ewah_word_type] ewah_bool_iterator ctypedef vector[size_t] bitset_array -ctypedef map[np.uint64_t, ewah_bool_array] ewah_map +ctypedef cmap[np.uint64_t, ewah_bool_array] ewah_map ctypedef stringstream sstream ctypedef BoolArray[ewah_word_type] bool_array diff --git a/yt/utilities/lib/ewah_bool_wrap.pyx b/yt/utilities/lib/ewah_bool_wrap.pyx index 8564d173a53..13ae0d96a90 100644 --- a/yt/utilities/lib/ewah_bool_wrap.pyx +++ b/yt/utilities/lib/ewah_bool_wrap.pyx @@ -157,17 +157,17 @@ cdef class FileBitmasks: cdef np.int32_t ifile cdef ewah_bool_array iarr, arr_two, arr_swap cdef ewah_bool_array* coll_refn - cdef map[np.uint64_t, ewah_bool_array] map_keys, map_refn - cdef map[np.uint64_t, ewah_bool_array]* coll_coll - cdef map[np.uint64_t, ewah_bool_array]* map_bitmask + cdef cmap[np.uint64_t, ewah_bool_array] map_keys, map_refn + cdef cmap[np.uint64_t, ewah_bool_array]* coll_coll + cdef cmap[np.uint64_t, ewah_bool_array]* map_bitmask coll_refn = coll.ewah_refn if coll_refn[0].numberOfOnes() == 0: if verbose == 1: print("{: 10d}/{: 10d} collisions at refined refinement. ({: 10.5f}%)".format(0,0,0)) return (0,0) - coll_coll = coll.ewah_coll + coll_coll = coll.ewah_coll for ifile in range(self.nfiles): - map_bitmask = ( self.ewah_coll)[ifile] + map_bitmask = ( self.ewah_coll)[ifile] for it_mi1 in map_bitmask[0]: mi1 = it_mi1.first iarr = it_mi1.second From 6debe4edb81ea387209ed85e77a3398e4c9bad95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 21 Jul 2020 18:05:09 +0200 Subject: [PATCH 093/653] update call to isort on travis and in pull request template --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .travis.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index e0040b5fd1f..f8f443bf401 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -21,7 +21,7 @@ detail. Why is this change required? What problem does it solve?--> - [ ] pass `flake8 yt/` -- [ ] pass `isort -rc . --check-only` +- [ ] pass `isort . --check --diff` - [ ] pass `black --check yt/` - [ ] New features are documented, with docstrings and narrative docs - [ ] Adds a test for any bugs fixed. Adds tests for new features. diff --git a/.travis.yml b/.travis.yml index a347cbee058..9c3b7d7f570 100644 --- a/.travis.yml +++ b/.travis.yml @@ -86,7 +86,7 @@ jobs: - stage: Lint name: "isort" python: 3.6 - script: isort --check-only -rc yt/ + script: isort . --check --diff - stage: Lint name: "black" From 437e2571dce0be2490420d6016117a2c4b22b063 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 21 Jul 2020 19:04:06 +0200 Subject: [PATCH 094/653] add new isort pass commit to ignored list for gitblame --- .git-blame-ignore-revs | 1 + 1 file changed, 1 insertion(+) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index f5a1bce97b2..a88bbbf6bb1 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -1,5 +1,6 @@ # transition to isort 7edfcee093cca277307aabdb180e0ffc69768291 +81418e459f16c48d6b7a75d6ef8035dfe9651b39 # transisiton to black ebadee629414aed2c7b6526e22a419205329ec38 From 022431292a2d7ee0541ed790be7c725c1012c745 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 17:01:25 +0200 Subject: [PATCH 095/653] fix: bare except statements --- yt/frontends/arepo/data_structures.py | 3 +-- yt/utilities/lib/cykdtree/tests/test_kdtree.py | 4 ++-- 2 files changed, 3 insertions(+), 4 deletions(-) diff --git a/yt/frontends/arepo/data_structures.py b/yt/frontends/arepo/data_structures.py index 1dc5b6abfe1..649d6057444 100644 --- a/yt/frontends/arepo/data_structures.py +++ b/yt/frontends/arepo/data_structures.py @@ -56,9 +56,8 @@ def _is_valid(self, *args, **kwargs): ) ) fh.close() - except: + except Exception: valid = False - pass return valid def _get_uvals(self): diff --git a/yt/utilities/lib/cykdtree/tests/test_kdtree.py b/yt/utilities/lib/cykdtree/tests/test_kdtree.py index bad279a8d5b..c66a6e70a16 100644 --- a/yt/utilities/lib/cykdtree/tests/test_kdtree.py +++ b/yt/utilities/lib/cykdtree/tests/test_kdtree.py @@ -73,11 +73,11 @@ def test_neighbors(periodic=False): assert len(right_neighbors[d][leaf.id]) == len(leaf.right_neighbors[d]) for i in range(len(leaf.right_neighbors[d])): assert right_neighbors[d][leaf.id][i] == leaf.right_neighbors[d][i] - except: + except Exception as e: for leaf in tree.leaves: print(leaf.id, leaf.left_edge, leaf.right_edge) print(out_str) - raise + raise e @parametrize(npts=100, ndim=(2, 3), periodic=(False, True)) From 4be4935e681e1ef5b09b63f21ae488494119caa3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 14:35:41 +0200 Subject: [PATCH 096/653] ignore b008 errors (intended behaviour in tests) --- yt/testing.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/testing.py b/yt/testing.py index beac6e2c9eb..135deef8ea5 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -574,7 +574,7 @@ def fake_sph_grid_ds(hsml_factor=1.0): return load_particles(data=data, length_unit=1.0, bbox=bbox) -def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): +def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B008 # Implementation taken from url: # http://docs.hyperion-rt.org/en/stable/advanced/indepth_oct.html @@ -600,7 +600,7 @@ def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): def fake_octree_ds( - prng=RandomState(0x1D3D3D3), + prng=RandomState(0x1D3D3D3), # noqa B008 refined=None, quantities=None, bbox=None, From c85d1538951e0468c2000ec9bb2933ef012b2095 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 21:49:47 +0200 Subject: [PATCH 097/653] a better random state seed Co-authored-by: Matthew Turk --- yt/testing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/testing.py b/yt/testing.py index 135deef8ea5..394068462a5 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -600,7 +600,7 @@ def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B0 def fake_octree_ds( - prng=RandomState(0x1D3D3D3), # noqa B008 + prng=RandomState(0x4D3D3D3), # noqa B008 refined=None, quantities=None, bbox=None, From 3aae19ac3ef9b49e309328114b75a9deaaa7c70e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 14:21:31 +0200 Subject: [PATCH 098/653] fix: fix flake8-bugbear B009 (Do not call getattr with a constant attribute value) errors --- yt/data_objects/static_output.py | 4 +--- yt/frontends/ytdata/data_structures.py | 4 +--- yt/testing.py | 4 ++-- yt/utilities/command_line.py | 2 +- 4 files changed, 5 insertions(+), 9 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 57cbddcaf41..9e71c538c11 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -532,9 +532,7 @@ def print_key_parameters(self): continue v = getattr(self, a) mylog.info("Parameters: %-25s = %s", a, v) - if hasattr(self, "cosmological_simulation") and getattr( - self, "cosmological_simulation" - ): + if hasattr(self, "cosmological_simulation") and self.cosmological_simulation: for a in [ "current_redshift", "omega_lambda", diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 8e6eb8a1434..5a6c41f6e36 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -752,9 +752,7 @@ def print_key_parameters(self): v = getattr(self, a) if v is not None: mylog.info("Parameters: %-25s = %s", a, v) - if hasattr(self, "cosmological_simulation") and getattr( - self, "cosmological_simulation" - ): + if hasattr(self, "cosmological_simulation") and self.cosmological_simulation: for a in [ "current_redshift", "omega_lambda", diff --git a/yt/testing.py b/yt/testing.py index beac6e2c9eb..c14be0ccd15 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -41,8 +41,8 @@ def nop(): _t = _Dummy("nop") -assert_true = getattr(_t, "assertTrue") -assert_less_equal = getattr(_t, "assertLessEqual") +assert_true = getattr(_t, "assertTrue") # noqa: B009 +assert_less_equal = getattr(_t, "assertLessEqual") # noqa: B009 def assert_rel_equal(a1, a2, decimals, err_msg="", verbose=True): diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 23770409657..27fcb2e8079 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -1847,7 +1847,7 @@ def run_main(): # http://bugs.python.org/issue16308 # http://bugs.python.org/issue9253 try: - getattr(args, "func") + args.func except AttributeError: parser.print_help() sys.exit(0) From a82ddece6ed96dbc29d7c13861da6ae70b585e0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 17:09:45 +0200 Subject: [PATCH 099/653] fix: B010 errors (Do not call setattr with a constant attribute value) --- yt/data_objects/profiles.py | 4 ++-- yt/data_objects/static_output.py | 6 +----- 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index 5ae0d208b43..199c0cd7a30 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -1454,8 +1454,8 @@ def create_profile( for o_bin, ax in zip(o_bins, ["x", "y", "z"]): kwargs["override_bins_{0}".format(ax)] = o_bin obj = cls(*args, **kwargs) - setattr(obj, "accumulation", accumulation) - setattr(obj, "fractional", fractional) + obj.accumulation = accumulation + obj.fractional = fractional if fields is not None: obj.add_fields([field for field in fields]) for field in fields: diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 57cbddcaf41..a7ec6f187c7 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -116,11 +116,7 @@ def __get__(self, instance, owner): pass if self.display_array: try: - setattr( - ret, - "_ipython_display_", - functools.partial(_wrap_display_ytarray, ret), - ) + ret._ipython_display_ = functools.partial(_wrap_display_ytarray, ret) # This will error out if the items have yet to be turned into # YTArrays, in which case we just let it go. except AttributeError: From d696291f23ed2a2cf3ec4b3de453076daf3bc50d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 17:18:58 +0200 Subject: [PATCH 100/653] fix: one B011 error (avoid assert False) --- yt/visualization/volume_rendering/tests/test_off_axis_SPH.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py index 02d51882c05..8de78d2d119 100644 --- a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py +++ b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py @@ -306,5 +306,5 @@ def find_compare_maxima(expected_maxima, buf, resolution, width): found_match = True break if found_match is not True: - assert False + raise AssertionError pass From d344f05ef22e1076697974122591e4d815da2c9c Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 18 Mar 2020 18:42:01 -0500 Subject: [PATCH 101/653] [tests] Mock a method outputing files that we don't even look at --- tests/test_minimal_requirements.txt | 1 + tests/test_requirements.txt | 2 +- yt/data_objects/tests/test_projection.py | 4 +- yt/data_objects/tests/test_slice.py | 4 +- yt/visualization/tests/test_particle_plot.py | 48 ++++++++++++++------ 5 files changed, 42 insertions(+), 17 deletions(-) diff --git a/tests/test_minimal_requirements.txt b/tests/test_minimal_requirements.txt index 0233a235406..2ee7cae08bc 100644 --- a/tests/test_minimal_requirements.txt +++ b/tests/test_minimal_requirements.txt @@ -9,3 +9,4 @@ pyyaml>=4.2b1 coverage==4.5.1 codecov==2.0.15 unyt==2.7.2 +mock diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index c4cb9c1a38d..029df6ef6b8 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -6,7 +6,7 @@ glueviz==0.13.3 h5py==2.10.0 ipython==7.1.1 matplotlib==3.3.0 -mock==2.0.0; python_version < '3.0' +mock nose-timer==1.0.0 nose==1.3.7 pandas==0.25.3 diff --git a/yt/data_objects/tests/test_projection.py b/yt/data_objects/tests/test_projection.py index 770d712b0d1..e11123a417c 100644 --- a/yt/data_objects/tests/test_projection.py +++ b/yt/data_objects/tests/test_projection.py @@ -1,6 +1,7 @@ import os import tempfile +import mock import numpy as np from yt.testing import assert_equal, assert_rel_equal, fake_amr_ds, fake_random_ds @@ -23,7 +24,8 @@ def teardown_func(fns): pass -def test_projection(): +@mock.patch("yt.visualization._mpl_imports.FigureCanvasAgg.print_figure") +def test_projection(pf): fns = [] for nprocs in [8, 1]: # We want to test both 1 proc and 8 procs, to make sure that diff --git a/yt/data_objects/tests/test_slice.py b/yt/data_objects/tests/test_slice.py index 60b058c8b42..4b0711d25ae 100644 --- a/yt/data_objects/tests/test_slice.py +++ b/yt/data_objects/tests/test_slice.py @@ -1,6 +1,7 @@ import os import tempfile +import mock import numpy as np from yt.testing import assert_equal, fake_random_ds @@ -21,7 +22,8 @@ def teardown_func(fns): pass -def test_slice(): +@mock.patch("yt.visualization._mpl_imports.FigureCanvasAgg.print_figure") +def test_slice(pf): fns = [] grid_eps = np.finfo(np.float64).eps for nprocs in [8, 1]: diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index e9a3aed30e1..16f41c40ef4 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -3,6 +3,7 @@ import tempfile import unittest +import mock import numpy as np from yt.convenience import load @@ -11,7 +12,6 @@ from yt.testing import ( assert_allclose, assert_array_almost_equal, - assert_fname, fake_particle_ds, requires_file, ) @@ -69,11 +69,7 @@ def setup(): YTArray([0.3, 0.4, 0.7], "cm"), ) -WEIGHT_FIELDS = ( - None, - "particle_ones", - ("all", "particle_mass"), -) +WEIGHT_FIELDS = (None, "particle_ones", ("all", "particle_mass")) PHASE_FIELDS = [ ("particle_velocity_x", "particle_position_z", "particle_mass"), @@ -231,9 +227,17 @@ def test_particle_phase_plot(self): particle_phases.append(ParticlePhasePlot.from_profile(pp)) particle_phases[0]._repr_html_() - for p in particle_phases: - for fname in TEST_FLNMS: - assert_fname(p.save(fname)[0]) + + with mock.patch( + "yt.visualization._mpl_imports.FigureCanvasAgg.print_figure" + ), mock.patch( + "yt.visualization._mpl_imports.FigureCanvasPdf.print_figure" + ), mock.patch( + "yt.visualization._mpl_imports.FigureCanvasPS.print_figure" + ): + for p in particle_phases: + for fname in TEST_FLNMS: + p.save(fname) tgal = "TipsyGalaxy/galaxy.00300" @@ -331,8 +335,15 @@ def test_particle_plot(self): test_ds = fake_particle_ds() for dim in range(3): pplot = ParticleProjectionPlot(test_ds, dim, "particle_mass") - for fname in TEST_FLNMS: - assert_fname(pplot.save(fname)[0]) + with mock.patch( + "yt.visualization._mpl_imports.FigureCanvasAgg.print_figure" + ), mock.patch( + "yt.visualization._mpl_imports.FigureCanvasPdf.print_figure" + ), mock.patch( + "yt.visualization._mpl_imports.FigureCanvasPS.print_figure" + ): + for fname in TEST_FLNMS: + pplot.save(fname)[0] def test_particle_plot_ds(self): test_ds = fake_particle_ds() @@ -341,7 +352,10 @@ def test_particle_plot_ds(self): pplot_ds = ParticleProjectionPlot( test_ds, dim, "particle_mass", data_source=ds_region ) - pplot_ds.save() + with mock.patch( + "yt.visualization._mpl_imports.FigureCanvasAgg.print_figure" + ): + pplot_ds.save() def test_particle_plot_c(self): test_ds = fake_particle_ds() @@ -350,7 +364,10 @@ def test_particle_plot_c(self): pplot_c = ParticleProjectionPlot( test_ds, dim, "particle_mass", center=center ) - pplot_c.save() + with mock.patch( + "yt.visualization._mpl_imports.FigureCanvasAgg.print_figure" + ): + pplot_c.save() def test_particle_plot_wf(self): test_ds = fake_particle_ds() @@ -359,7 +376,10 @@ def test_particle_plot_wf(self): pplot_wf = ParticleProjectionPlot( test_ds, dim, "particle_mass", weight_field=weight_field ) - pplot_wf.save() + with mock.patch( + "yt.visualization._mpl_imports.FigureCanvasAgg.print_figure" + ): + pplot_wf.save() def test_creation_with_width(self): test_ds = fake_particle_ds() From b5d3e8e3403731d7c11f74f8e0bc65647da15ddd Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 21 Jul 2020 15:04:30 -0500 Subject: [PATCH 102/653] Use pykdtree for Cartopy as it is 3 times faster --- appveyor.yml | 5 +++-- tests/test_requirements.txt | 1 + 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index b4629c30af1..b2aac8f156c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -31,7 +31,8 @@ install: # Install specified version of numpy and dependencies - "conda install --yes -c conda-forge numpy scipy nose pytest setuptools ipython git unyt - Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pooch pyyaml" + Cython sympy fastcache h5py matplotlib=3.1.3 mock pandas cartopy conda-build pooch pyyaml + nose-timer pykdtree" # install yt - "pip install -e ." @@ -39,7 +40,7 @@ install: build: false test_script: - - "nosetests --nologcapture -sv --traverse-namespace yt" + - "nosetests --with-timer --timer-top-n=20 --nologcapture --with-xunit -sv --traverse-namespace yt" # Enable this to be able to login to the build worker. You can use the # `remmina` program in Ubuntu, use the login information that the line below diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index 029df6ef6b8..53ab69d782d 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -28,3 +28,4 @@ firefly_api>=0.0.2 f90nml>=1.1.2 MiniballCpp>=0.2.1 pooch>=0.7.0 +pykdtree==1.3.1 From 295ca585578b1b7d60582d46f6f3fdc39f4578ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 22 Jun 2020 17:34:46 +0200 Subject: [PATCH 103/653] refactor: declare Dataset, Index, GridIndex as abstract classes --- doc/source/examining/low_level_inspection.rst | 7 +-- yt/data_objects/static_output.py | 43 ++++++++++++++----- yt/frontends/_skeleton/data_structures.py | 30 +++++++------ yt/frontends/_skeleton/io.py | 3 ++ yt/frontends/enzo/io.py | 6 --- yt/frontends/gadget_fof/data_structures.py | 10 ++++- yt/geometry/geometry_handler.py | 7 ++- yt/geometry/grid_geometry_handler.py | 15 ++++++- yt/utilities/io_handler.py | 14 +++--- 9 files changed, 91 insertions(+), 44 deletions(-) diff --git a/doc/source/examining/low_level_inspection.rst b/doc/source/examining/low_level_inspection.rst index d08a876f2ec..9b5e8232e82 100644 --- a/doc/source/examining/low_level_inspection.rst +++ b/doc/source/examining/low_level_inspection.rst @@ -88,15 +88,12 @@ normal, you can access the grid as you would a normal object: print(g["density"]) print(g["density"].min()) -To access the raw data, you have to call the IO handler from the index -instead. This is somewhat more low-level. +To access the raw data (as found in the file), use .. code-block:: python g = ds.index.grids[1043] - rho = ds.index.io.pop(g, "density") - -This field will be the raw data found in the file. + rho = g["density"].in_base("code") .. _finding-data-at-fixed-points: diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 57cbddcaf41..a4bbc7034b7 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1,3 +1,4 @@ +import abc import functools import itertools import os @@ -73,7 +74,7 @@ def _raise_unsupp(*args, **kwargs): return _raise_unsupp -class RegisteredDataset(type): +class RegisteredDataset(abc.ABCMeta): def __init__(cls, name, b, d): type.__init__(cls, name, b, d) output_type_registry[name] = cls @@ -168,6 +169,16 @@ class Dataset(metaclass=RegisteredDataset): _proj_type = "quad_proj" _ionization_label_format = "roman_numeral" + # these are set in self._parse_parameter_file() + domain_left_edge = MutableAttribute() + domain_right_edge = MutableAttribute() + domain_dimensions = MutableAttribute() + periodicity = MutableAttribute() + + # these are set in self._set_derived_attrs() + domain_width = MutableAttribute() + domain_center = MutableAttribute() + def __new__(cls, filename=None, *args, **kwargs): if not isinstance(filename, str): obj = object.__new__(cls) @@ -273,6 +284,26 @@ def unique_identifier(self): def unique_identifier(self, value): self._unique_identifier = value + # abstract methods require implementation in subclasses + @classmethod + @abc.abstractmethod + def _is_valid(cls, *args, **kwargs): + # A heuristic test to determine if the data format can be interpreted + # with the present frontend + return False + + @abc.abstractmethod + def _parse_parameter_file(self): + # set up various attributes from self.parameter_filename + # see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here + pass + + @abc.abstractmethod + def _set_code_unit_attributes(self): + # set up code-units to physical units normalization factors + # see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here + pass + def _set_derived_attrs(self): if self.domain_left_edge is None or self.domain_right_edge is None: self.domain_center = np.zeros(3) @@ -351,12 +382,6 @@ def generate_file_md5(m, filename, blocksize=2 ** 20): self._checksum = m return self._checksum - domain_left_edge = MutableAttribute(True) - domain_right_edge = MutableAttribute(True) - domain_width = MutableAttribute(True) - domain_dimensions = MutableAttribute(False) - domain_center = MutableAttribute(True) - @property def _mrep(self): return MinimalDataset(self) @@ -368,10 +393,6 @@ def _skip_cache(self): def hub_upload(self): self._mrep.upload() - @classmethod - def _is_valid(cls, *args, **kwargs): - return False - @classmethod def _guess_candidates(cls, base, directories, files): """ diff --git a/yt/frontends/_skeleton/data_structures.py b/yt/frontends/_skeleton/data_structures.py index faaf4aa0cae..d0e6bf06000 100644 --- a/yt/frontends/_skeleton/data_structures.py +++ b/yt/frontends/_skeleton/data_structures.py @@ -48,7 +48,7 @@ def _detect_output_fields(self): pass def _count_grids(self): - # This needs to set self.num_grids + # This needs to set self.num_grids (int) pass def _parse_index(self): @@ -63,14 +63,16 @@ def _parse_index(self): pass def _populate_grid_objects(self): - # For each grid g, this must call: - # g._prepare_grid() - # g._setup_dx() + # the minimal form of this method is + # + # for g in self.grids: + # g._prepare_grid() + # g._setup_dx() + # # This must also set: # g.Children <= list of child grids # g.Parent <= parent grid - # This is handled by the frontend because often the children must be - # identified. + # This is handled by the frontend because often the children must be identified. pass @@ -115,13 +117,15 @@ def _parse_parameter_file(self): # will be converted to YTArray automatically at a later time. # This includes the cosmological parameters. # - # self.parameters <= full of code-specific items of use - # self.domain_left_edge <= array of float64 - # self.domain_right_edge <= array of float64 + # self.unique_identifier <= unique identifier for the dataset + # being read (e.g., UUID or ST_CTIME) + # self.parameters <= dict full of code-specific items of use + # self.domain_left_edge <= three-element array of float64 + # self.domain_right_edge <= three-element array of float64 # self.dimensionality <= int - # self.domain_dimensions <= array of int64 + # self.domain_dimensions <= three-element array of int64 # self.periodicity <= three-element tuple of booleans - # self.current_time <= simulation time in code units + # self.current_time <= simulation time in code units (float) # # We also set up cosmological information. Set these to zero if # non-cosmological. @@ -132,9 +136,11 @@ def _parse_parameter_file(self): # self.omega_matter <= float # self.hubble_constant <= float - # optional (has default implementation) + # optional (the followin have default implementations) # self.unique_identifier <= unique identifier for the dataset # being read (e.g., UUID or ST_CTIME) (int) + # + # self.geometry (defaults to 'cartesian') <= a lower case string ("cartesian", "polar", "cylindrical"...) pass @classmethod diff --git a/yt/frontends/_skeleton/io.py b/yt/frontends/_skeleton/io.py index 3cc840e81d9..3fc8917a083 100644 --- a/yt/frontends/_skeleton/io.py +++ b/yt/frontends/_skeleton/io.py @@ -33,6 +33,9 @@ def _read_fluid_selection(self, chunks, selector, fields, size): # Fortran-like input array with the dimension (z,y,x), a matrix # transpose is required (e.g., using np_array.transpose() or # np_array.swapaxes(0,2)). + + # Note this method is not abstract, and has a default implementation in the base class. + # However, the default implementation requires that the method io_iter be defined pass def _read_chunk_data(self, chunk, fields): diff --git a/yt/frontends/enzo/io.py b/yt/frontends/enzo/io.py index 7c0ccd74be1..fa25fd55104 100644 --- a/yt/frontends/enzo/io.py +++ b/yt/frontends/enzo/io.py @@ -299,9 +299,6 @@ def _read_data_set(self, grid, field): f.close() return ds.transpose()[:, :, None] - def modify(self, field): - pass - def _read_fluid_selection(self, chunks, selector, fields, size): rv = {} # Now we have to do something unpleasant @@ -358,6 +355,3 @@ def _read_data_set(self, grid, field): ds = f["/Grid%08i/%s" % (grid.id, field)][:] f.close() return ds.transpose()[:, None, None] - - def modify(self, field): - pass diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 9ae7b2f11d5..dc6efe24325 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -458,6 +458,11 @@ def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): self.real_ds.parameter_filename, dataset_type ) + @classmethod + def _is_valid(self, *args, **kwargs): + # This class is not meant to be instanciated by yt.load() + return False + def print_key_parameters(self): pass @@ -483,10 +488,13 @@ def _parse_parameter_file(self): setattr(self, attr, getattr(self.real_ds, attr)) def set_code_units(self): + self._set_code_unit_attributes() + self.unit_registry = self.real_ds.unit_registry + + def _set_code_unit_attributes(self): for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: my_unit = "%s_unit" % unit setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) - self.unit_registry = self.real_ds.unit_registry def __repr__(self): return "%s" % self.real_ds diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index c19ba697d70..ae7b0f0534d 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -1,3 +1,4 @@ +import abc import os import pickle import weakref @@ -17,7 +18,7 @@ ) -class Index(ParallelAnalysisInterface): +class Index(ParallelAnalysisInterface, abc.ABC): """The base index class""" _unsupported_objects = () @@ -44,6 +45,10 @@ def __init__(self, ds, dataset_type): mylog.debug("Detecting fields.") self._detect_output_fields() + @abc.abstractmethod + def _detect_output_fields(self): + pass + def _initialize_state_variables(self): self._parallel_locking = False self._data_file = None diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 318f5c5ef09..46a0d83fb72 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -1,3 +1,4 @@ +import abc import weakref from collections import defaultdict @@ -16,7 +17,7 @@ from .grid_container import GridTree, MatchPointsToGrids -class GridIndex(Index): +class GridIndex(Index, abc.ABC): """The index class for patch and block AMR datasets. """ float_type = "float64" @@ -45,6 +46,18 @@ def _setup_geometry(self): mylog.debug("Re-examining index") self._initialize_level_stats() + @abc.abstractmethod + def _count_grids(self): + pass + + @abc.abstractmethod + def _parse_index(self): + pass + + @abc.abstractmethod + def _populate_grid_objects(self): + pass + def __del__(self): del self.grid_dimensions del self.grid_left_edge diff --git a/yt/utilities/io_handler.py b/yt/utilities/io_handler.py index 01dfe17acce..67b0c86f7e4 100644 --- a/yt/utilities/io_handler.py +++ b/yt/utilities/io_handler.py @@ -56,13 +56,6 @@ def __init__(self, ds): def preload(self, chunk, fields, max_size): yield self - def pop(self, grid, field): - if grid.id in self.queue and field in self.queue[grid.id]: - return self.modify(self.queue[grid.id].pop(field)) - else: - # We only read the one set and do not store it if it isn't pre-loaded - return self._read_data_set(grid, field) - def peek(self, grid, field): return self.queue[grid.id].get(field, None) @@ -133,6 +126,13 @@ def _read_fluid_selection(self, chunks, selector, fields, size): ind[field] += obj.select(selector, data, rv[field], ind[field]) return rv + def io_iter(self, chunks, fields): + raise NotImplementedError( + "subclassing Dataset.io_iter this is required in order to use the default " + "implementation of Dataset._read_fluid_selection. " + "Custom implementations of the latter may not rely on this method." + ) + def _read_data_slice(self, grid, field, axis, coord): sl = [slice(None), slice(None), slice(None)] sl[axis] = slice(coord, coord + 1) From 080c786f1abb9acb78f9e3edb03daa9fd98c89ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 15:29:29 +0200 Subject: [PATCH 104/653] fix: B014 errors (redundancy in exception catching) --- yt/frontends/flash/data_structures.py | 4 ++-- yt/frontends/open_pmd/data_structures.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 894e0a015ca..32b2ae9caba 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -459,7 +459,7 @@ def _is_valid(self, *args, **kwargs): fileh = HDF5FileHandler(args[0]) if "bounding box" in fileh["/"].keys(): return True - except (IOError, OSError, ImportError): + except (OSError, ImportError): pass return False @@ -530,7 +530,7 @@ def _is_valid(self, *args, **kwargs): and "localnp" in fileh["/"].keys() ): return True - except (IOError, OSError, ImportError): + except (OSError, ImportError): pass return False diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 6550148ee89..448aae60d3a 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -619,7 +619,7 @@ def _is_valid(self, *args, **kwargs): return True return False - except (IOError, OSError, ImportError): + except (OSError, ImportError): return False @@ -685,5 +685,5 @@ def _is_valid(self, *args, **kwargs): return True return False - except (IOError, OSError, ImportError): + except (OSError, ImportError): return False From 1ae047b5e0c64f9db96ed3983061ac6f37bc80ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 3 Jul 2020 09:31:29 +0200 Subject: [PATCH 105/653] fix: remove a redundant error --- yt/fields/field_info_container.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index f22cf8031af..561371656b2 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -428,8 +428,6 @@ def check_derived_fields(self, fields_to_check=None): unavailable = [] fields_to_check = fields_to_check or list(self.keys()) for field in fields_to_check: - if field not in self: - raise YTFieldNotFound(str(field)) fi = self[field] try: fd = fi.get_dependencies(ds=self.ds) From da5f4d990bec68d2d6602eb73fe19f5ddf611274 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 21 Jul 2020 13:41:46 -0500 Subject: [PATCH 106/653] Bump IPython version for py38 support. Fixes #2769 --- doc/source/intro/index.rst | 6 +++--- tests/test_requirements.txt | 2 +- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/source/intro/index.rst b/doc/source/intro/index.rst index f69d243d56b..22f3c4a6175 100644 --- a/doc/source/intro/index.rst +++ b/doc/source/intro/index.rst @@ -126,12 +126,12 @@ Executing and Scripting yt yt is written almost entirely in python and it functions as a library that you can import into your python scripts. There is full docstring documentation for all of the major classes and functions in the :ref:`API docs -`. yt has support for :ref:`running in iPython and for running -iPython notebooks ` for fully interactive sessions both +`. yt has support for :ref:`running in IPython and for running +IPython notebooks ` for fully interactive sessions both locally and on remote supercomputers. yt also has a number of ways it can be :ref:`executed at the command line ` for simple tasks like automatically loading a dataset, updating the yt sourcecode, starting an -iPython notebook, or uploading scripts and images to public locations. There +IPython notebook, or uploading scripts and images to public locations. There is an optional :ref:`yt configuration file ` you can modify for controlling local settings like color, logging, output settings. There is also an optional :ref:`yt plugin file ` you can create diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index 53ab69d782d..1002c74d8e8 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -4,7 +4,7 @@ coverage==4.5.4 fastcache==1.0.2 glueviz==0.13.3 h5py==2.10.0 -ipython==7.1.1 +ipython==7.6.1 matplotlib==3.3.0 mock nose-timer==1.0.0 From 8666a0de03ea1f4d94adceb377357b758e8fc671 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 10 Jul 2020 12:02:07 -0500 Subject: [PATCH 107/653] Remove all occurences of VR.render() called right before VR.save() --- doc/source/cookbook/amrkdtree_downsampling.py | 7 ++----- doc/source/cookbook/camera_movement.py | 3 --- doc/source/cookbook/opaque_rendering.py | 7 ------- doc/source/cookbook/sigma_clip.py | 1 - doc/source/cookbook/various_lens.py | 6 ------ yt/utilities/answer_testing/answer_tests.py | 1 - yt/utilities/answer_testing/framework.py | 1 - yt/visualization/volume_rendering/tests/test_lenses.py | 6 ------ 8 files changed, 2 insertions(+), 30 deletions(-) diff --git a/doc/source/cookbook/amrkdtree_downsampling.py b/doc/source/cookbook/amrkdtree_downsampling.py index db8a06b64e9..8abe9decdcc 100644 --- a/doc/source/cookbook/amrkdtree_downsampling.py +++ b/doc/source/cookbook/amrkdtree_downsampling.py @@ -39,7 +39,6 @@ render_source.set_volume(kd_low_res) render_source.set_field("density") -sc.render() sc.save("v1.png", sigma_clip=6.0) # This operation was substantially faster. Now lets modify the low resolution @@ -54,13 +53,11 @@ alpha=np.ones(4, dtype="float64"), colormap="RdBu_r", ) -sc.render() sc.save("v2.png", sigma_clip=6.0) # This looks better. Now let's try turning on opacity. tf.grey_opacity = True -sc.render() sc.save("v3.png", sigma_clip=6.0) # ## That seemed to pick out som interesting structures. Now let's bump up the @@ -74,13 +71,13 @@ alpha=10.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -sc.render() +tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], + alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r') sc.save("v4.png", sigma_clip=6.0) # ## This looks pretty good, now lets go back to the full resolution AMRKDTree # render_source.set_volume(kd) -sc.render() sc.save("v5.png", sigma_clip=6.0) # This looks great! diff --git a/doc/source/cookbook/camera_movement.py b/doc/source/cookbook/camera_movement.py index c8612da9da6..a114d8cd858 100644 --- a/doc/source/cookbook/camera_movement.py +++ b/doc/source/cookbook/camera_movement.py @@ -12,19 +12,16 @@ # Zoom out by a factor of 2 over 5 frames for _ in cam.iter_zoom(0.5, 5): - sc.render() sc.save("camera_movement_%04i.png" % frame) frame += 1 # Move to the position [-10.0, 10.0, -10.0] over 5 frames pos = ds.arr([-10.0, 10.0, -10.0], "code_length") for _ in cam.iter_move(pos, 5): - sc.render() sc.save("camera_movement_%04i.png" % frame) frame += 1 # Rotate by 180 degrees over 5 frames for _ in cam.iter_rotate(np.pi, 5): - sc.render() sc.save("camera_movement_%04i.png" % frame) frame += 1 diff --git a/doc/source/cookbook/opaque_rendering.py b/doc/source/cookbook/opaque_rendering.py index 2b62cf43bea..a46e7d5e40b 100644 --- a/doc/source/cookbook/opaque_rendering.py +++ b/doc/source/cookbook/opaque_rendering.py @@ -13,7 +13,6 @@ tf.add_layers( 4, 0.01, col_bounds=[-27.5, -25.5], alpha=np.logspace(-3, 0, 4), colormap="RdBu_r" ) -sc.render() sc.save("v1.png", sigma_clip=6.0) # In this case, the default alphas used (np.logspace(-3,0,Nbins)) does not @@ -25,14 +24,12 @@ tf.add_layers( 4, 0.01, col_bounds=[-27.5, -25.5], alpha=np.logspace(0, 0, 4), colormap="RdBu_r" ) -sc.render() sc.save("v2.png", sigma_clip=6.0) # Now let's set the grey_opacity to True. This should make the inner portions # start to be obscured tf.grey_opacity = True -sc.render() sc.save("v3.png", sigma_clip=6.0) # That looks pretty good, but let's start bumping up the opacity. @@ -45,7 +42,6 @@ alpha=10.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -sc.render() sc.save("v4.png", sigma_clip=6.0) # Let's bump up again to see if we can obscure the inner contour. @@ -58,7 +54,6 @@ alpha=30.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -sc.render() sc.save("v5.png", sigma_clip=6.0) # Now we are losing sight of everything. Let's see if we can obscure the next @@ -72,14 +67,12 @@ alpha=100.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -sc.render() sc.save("v6.png", sigma_clip=6.0) # That is very opaque! Now lets go back and see what it would look like with # grey_opacity = False tf.grey_opacity = False -sc.render() sc.save("v7.png", sigma_clip=6.0) # That looks pretty different, but the main thing is that you can see that the diff --git a/doc/source/cookbook/sigma_clip.py b/doc/source/cookbook/sigma_clip.py index 22845dbed4f..cf46b96ba78 100644 --- a/doc/source/cookbook/sigma_clip.py +++ b/doc/source/cookbook/sigma_clip.py @@ -10,7 +10,6 @@ # Sigma clipping removes the highest intensity pixels in a volume render, # which affects the overall contrast of the image. sc = yt.create_scene(ds, field=("gas", "density")) -sc.render() sc.save("clip_0.png") sc.save("clip_2.png", sigma_clip=2) sc.save("clip_4.png", sigma_clip=4) diff --git a/doc/source/cookbook/various_lens.py b/doc/source/cookbook/various_lens.py index 708ae2495e1..54ade80ff12 100644 --- a/doc/source/cookbook/various_lens.py +++ b/doc/source/cookbook/various_lens.py @@ -32,7 +32,6 @@ # height of final projection, while width[2] in plane-parallel lens is not used. cam.set_width(ds.domain_width * 0.5) sc.add_source(vol) -sc.render() sc.save("lens_plane-parallel.png", sigma_clip=6.0) # Perspective lens @@ -47,7 +46,6 @@ # camera and the final image. cam.set_width(ds.domain_width * 0.5) sc.add_source(vol) -sc.render() sc.save("lens_perspective.png", sigma_clip=6.0) # Stereo-perspective lens @@ -61,7 +59,6 @@ # Set the distance between left-eye and right-eye. cam.lens.disparity = ds.domain_width[0] * 1.0e-3 sc.add_source(vol) -sc.render() sc.save("lens_stereo-perspective.png", sigma_clip=6.0) # Fisheye lens @@ -74,7 +71,6 @@ cam.set_width(ds.domain_width) cam.lens.fov = 360.0 sc.add_source(vol) -sc.render() sc.save("lens_fisheye.png", sigma_clip=6.0) # Spherical lens @@ -90,7 +86,6 @@ # In (stereo)spherical camera, camera width is not used since the entire volume # will be rendered sc.add_source(vol) -sc.render() sc.save("lens_spherical.png", sigma_clip=6.0) # Stereo-spherical lens @@ -107,5 +102,4 @@ # Set the distance between left-eye and right-eye. cam.lens.disparity = ds.domain_width[0] * 1.0e-3 sc.add_source(vol) -sc.render() sc.save("lens_stereo-spherical.png", sigma_clip=6.0) diff --git a/yt/utilities/answer_testing/answer_tests.py b/yt/utilities/answer_testing/answer_tests.py index 7665df50d91..ff1a0a7a89d 100644 --- a/yt/utilities/answer_testing/answer_tests.py +++ b/yt/utilities/answer_testing/answer_tests.py @@ -304,7 +304,6 @@ def extract_connected_sets(ds_fn, data_source, field, num_levels, min_val, max_v def VR_image_comparison(scene): tmpfd, tmpname = tempfile.mkstemp(suffix=".png") os.close(tmpfd) - scene.render() scene.save(tmpname, sigma_clip=1.0) image = mpimg.imread(tmpname) os.remove(tmpname) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 1ec0e7fc61c..d5725392a56 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -820,7 +820,6 @@ def __init__(self, scene, ds, desc, decimals): def run(self): tmpfd, tmpname = tempfile.mkstemp(suffix=".png") os.close(tmpfd) - self.scene.render() self.scene.save(tmpname, sigma_clip=1.0) image = mpimg.imread(tmpname) os.remove(tmpname) diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index 5d1fd1db7a7..531eceda710 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -46,7 +46,6 @@ def test_perspective_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_perspective_%s.png" % self.field[1], sigma_clip=6.0) def test_stereoperspective_lens(self): @@ -58,7 +57,6 @@ def test_stereoperspective_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_stereoperspective_%s.png" % self.field[1], sigma_clip=6.0) def test_fisheye_lens(self): @@ -73,7 +71,6 @@ def test_fisheye_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_fisheye_%s.png" % self.field[1], sigma_clip=6.0) def test_plane_lens(self): @@ -86,7 +83,6 @@ def test_plane_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_plane_%s.png" % self.field[1], sigma_clip=6.0) def test_spherical_lens(self): @@ -98,7 +94,6 @@ def test_spherical_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_spherical_%s.png" % self.field[1], sigma_clip=6.0) def test_stereospherical_lens(self): @@ -112,5 +107,4 @@ def test_stereospherical_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.render() sc.save("test_stereospherical_%s.png" % self.field[1], sigma_clip=6.0) From 895c0b6905bca91d58602b4db71de72594cc28ad Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 21 Jul 2020 15:43:22 +0200 Subject: [PATCH 108/653] fixup flake8 E731 (assigning to lambdas) --- setup.cfg | 1 - yt/data_objects/analyzer_objects.py | 6 +++++- yt/data_objects/time_series.py | 6 +++++- yt/frontends/art/io.py | 21 ++++++++++++------- yt/utilities/decompose.py | 4 +++- .../volume_rendering/old_camera.py | 6 +++++- 6 files changed, 31 insertions(+), 13 deletions(-) diff --git a/setup.cfg b/setup.cfg index a6db023776d..e37e24564aa 100644 --- a/setup.cfg +++ b/setup.cfg @@ -35,7 +35,6 @@ ignore = E203, # Whitespace before ':' (black compatibility) E306, # Expected 1 blank line before a nested definition E501, # Line too long (black compatibility) E722, # Do not use bare except, specify exception instead TODO: handle - E731, # Do not assign a lambda expression, use a def TODO: add noqas in places this triggers instead of ignoring it everywhere E741, # Do not use variables named 'I', 'O', or 'l' W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index a63ed8b54d5..78663dfb820 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -89,7 +89,11 @@ class ParameterValue(AnalysisTask): def __init__(self, parameter, cast=None): self.parameter = parameter if cast is None: - cast = lambda a: a + + def _identity(x): + return x + + cast = _identity self.cast = cast def eval(self, ds): diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index eeda680bb4a..1cd50b8b63d 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -181,7 +181,11 @@ def __init__( self.tasks = AnalysisTaskProxy(self) self.params = TimeSeriesParametersContainer(self) if setup_function is None: - setup_function = lambda a: None + + def _null(x): + return None + + setup_function = _null self._setup_function = setup_function for type_name in data_object_registry: setattr( diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index 233a58b22e2..09030bad77f 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -2,6 +2,7 @@ import os.path import sys from collections import defaultdict +from functools import partial import numpy as np @@ -116,14 +117,14 @@ def _get_field(self, field): pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax) npa = idxb - idxa sizes = np.diff(np.concatenate(([0], self.ls))) - rp = lambda ax: read_particles( - self.file_particle, self.Nrow, idxa=idxa, idxb=idxb, fields=ax + rp = partial( + read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) for i, ax in enumerate("xyz"): if fname.startswith("particle_position_%s" % ax): dd = self.ds.domain_dimensions[0] off = 1.0 / dd - tr[field] = rp([ax])[0] / dd - off + tr[field] = rp(fields=[ax])[0] / dd - off if fname.startswith("particle_velocity_%s" % ax): (tr[field],) = rp(["v" + ax]) if fname.startswith("particle_mass"): @@ -228,15 +229,15 @@ def _get_field(self, field): pbool, idxa, idxb = _determine_field_size(self.ds, ftype, self.ls, ptmax) npa = idxb - idxa sizes = np.diff(np.concatenate(([0], self.ls))) - rp = lambda ax: read_particles( - self.file_particle, self.Nrow, idxa=idxa, idxb=idxb, fields=ax + rp = partial( + read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) for i, ax in enumerate("xyz"): if fname.startswith("particle_position_%s" % ax): # This is not the same as domain_dimensions dd = self.ds.parameters["ng"] off = 1.0 / dd - tr[field] = rp([ax])[0] / dd - off + tr[field] = rp(fields=[ax])[0] / dd - off if fname.startswith("particle_velocity_%s" % ax): (tr[field],) = rp(["v" + ax]) if fname.startswith("particle_mass"): @@ -622,14 +623,18 @@ def b2a(bt, **kwargs): # converts code time into expansion factor # if Om0 ==1and OmL == 0 then b2a is (1 / (1-td))**2 # if bt < -190.0 or bt > -.10: raise 'bt outside of range' - f_b2a = lambda at: a2b(at, **kwargs) - bt + def f_b2a(at): + return a2b(at, **kwargs) - bt + return find_root(f_b2a, 1e-4, 1.1) # return so.brenth(f_b2a,1e-4,1.1) # return brent.brent(f_b2a) def a2t(at, Om0=0.27, Oml0=0.73, h=0.700): - integrand = lambda x: 1.0 / (x * sqrt(Oml0 + Om0 * x ** -3.0)) + def integrand(x): + return 1.0 / (x * sqrt(Oml0 + Om0 * x ** -3.0)) + # current_time,err = si.quad(integrand,0.0,at,epsabs=1e-6,epsrel=1e-6) current_time = quad(integrand, 1e-4, at) # spacings = np.logspace(-5,np.log10(at),num=int(1e5)) diff --git a/yt/utilities/decompose.py b/yt/utilities/decompose.py index ec34564bd7d..79fbf9b1ca7 100644 --- a/yt/utilities/decompose.py +++ b/yt/utilities/decompose.py @@ -1,6 +1,8 @@ import numpy as np -SIEVE_PRIMES = lambda l: l and l[:1] + SIEVE_PRIMES([n for n in l if n % l[0]]) + +def SIEVE_PRIMES(x): + return x and x[:1] + SIEVE_PRIMES([n for n in x if n % x[0]]) def decompose_to_primes(max_prime): diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 55f31cd172c..14b6d611ead 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -2040,7 +2040,11 @@ def plot_allsky_healpix( if take_log: func = np.log10 else: - func = lambda a: a + + def _identity(x): + return x + + func = _identity implot = ax.imshow( func(img), extent=(-np.pi, np.pi, -np.pi / 2, np.pi / 2), From adea07698c83a576ba8190fb455ca18636d53cb5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 21 Jul 2020 15:45:12 +0200 Subject: [PATCH 109/653] fixup flake8 E722 (bare excepts) --- setup.cfg | 1 - 1 file changed, 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index e37e24564aa..9019363cd9f 100644 --- a/setup.cfg +++ b/setup.cfg @@ -34,7 +34,6 @@ ignore = E203, # Whitespace before ':' (black compatibility) E302, # Expected 2 blank lines, found 0 E306, # Expected 1 blank line before a nested definition E501, # Line too long (black compatibility) - E722, # Do not use bare except, specify exception instead TODO: handle E741, # Do not use variables named 'I', 'O', or 'l' W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' From 1643a58ff05198b1b9d974bfba252ce9dd8e5eb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 20:03:06 +0200 Subject: [PATCH 110/653] docs: remove metions of legacy python in docs and scripts --- benchmarks/asv.conf.json | 2 +- doc/activate | 2 - doc/install_script.sh | 37 ++----------------- doc/source/analyzing/parallel_computation.rst | 2 +- doc/source/developing/debugdrive.rst | 2 +- doc/source/developing/external_analysis.rst | 2 +- doc/source/developing/testing.rst | 16 -------- doc/source/installing.rst | 4 +- setup.py | 2 +- 9 files changed, 11 insertions(+), 58 deletions(-) diff --git a/benchmarks/asv.conf.json b/benchmarks/asv.conf.json index 6b768543d2a..b1053412433 100644 --- a/benchmarks/asv.conf.json +++ b/benchmarks/asv.conf.json @@ -25,7 +25,7 @@ // The Pythons you'd like to test against. If not provided, defaults // to the current version of Python used to run `asv`. - // "pythons": ["2.7", "3.3"], + // "pythons": ["3.7", "3.8"], // The matrix of dependencies to test. Each key is the name of a // package (in PyPI) and the values are version numbers. An empty diff --git a/doc/activate b/doc/activate index cc83b616aa2..37bf8dd17bb 100644 --- a/doc/activate +++ b/doc/activate @@ -70,8 +70,6 @@ YT_DEST="$VIRTUAL_ENV" export YT_DEST _OLD_VIRTUAL_PYTHONPATH="$PYTHONPATH" -PYTHONPATH="$VIRTUAL_ENV/lib/python2.7/site-packages" -export PYTHONPATH _OLD_VIRTUAL_LD_LIBRARY_PATH="$LD_LIBRARY_PATH" LD_LIBRARY_PATH="$VIRTUAL_ENV/lib:$LD_LIBRARY_PATH" diff --git a/doc/install_script.sh b/doc/install_script.sh index 0185996394e..ca1a7cd4857 100644 --- a/doc/install_script.sh +++ b/doc/install_script.sh @@ -24,9 +24,6 @@ YT_DIR="" # These options can be set to customize the installation. -INST_PY3=1 # Install Python 3 instead of Python 2. If this is turned on, - # all Python packages (including yt) will be installed - # in Python 3. INST_GIT=1 # Install git or not? If git is not already installed, yt # cannot be installed from source. INST_EMBREE=0 # Install dependencies needed for Embree-accelerated ray tracing @@ -39,7 +36,6 @@ INST_CARTOPY=0 # Install cartopy? INST_NOSE=1 # Install nose? INST_NETCDF4=1 # Install netcdf4 and its python bindings? INST_POOCH=1 # Install pooch? -INST_HG=0 # Install Mercurial or not? # This is the branch we will install from for INST_YT_SOURCE=1 BRANCH="master" @@ -383,12 +379,8 @@ function do_exit exit 1 } -if [ $INST_PY3 -eq 1 ] -then - PYTHON_EXEC='python3' -else - PYTHON_EXEC='python2.7' -fi +PYTHON_EXEC='python3' + if type -P curl &>/dev/null then @@ -440,12 +432,7 @@ else exit 1 fi -if [ $INST_PY3 -eq 1 ] -then - PY_VERSION='3' -else - PY_VERSION='2' -fi +PY_VERSION='3' MINICONDA_PKG="Miniconda${PY_VERSION}-${MINICONDA_VERSION}-${MINICONDA_OS}-${MINICONDA_ARCH}.sh" @@ -516,10 +503,6 @@ then YT_DEPS+=('pooch') fi YT_DEPS+=('conda-build') -if [ $INST_PY3 -eq 0 ] && [ $INST_HG -eq 1 ] -then - YT_DEPS+=('mercurial') -fi YT_DEPS+=('sympy') if [ $INST_NETCDF4 -eq 1 ] @@ -551,13 +534,6 @@ for YT_DEP in "${YT_DEPS[@]}"; do log_cmd ${DEST_DIR}/bin/conda install -c conda-forge --yes ${YT_DEP} done -if [ $INST_PY3 -eq 1 ] && [ $INST_HG -eq 1 ] -then - echo "Installing mercurial" - log_cmd ${DEST_DIR}/bin/conda create -y -n py27 python=2.7 mercurial - log_cmd ln -s ${DEST_DIR}/envs/py27/bin/hg ${DEST_DIR}/bin -fi - if [ $INST_YT_SOURCE -eq 1 ] then log_cmd ${GIT_EXE} clone https://github.com/yt-project/yt_conda ${DEST_DIR}/src/yt_conda @@ -594,12 +570,7 @@ fi # conda doesn't package pyx, so we install manually with pip if [ $INST_PYX -eq 1 ] then - if [ $INST_PY3 -eq 1 ] - then - log_cmd ${DEST_DIR}/bin/pip install pyx - else - log_cmd ${DEST_DIR}/bin/pip install pyx==0.12.1 - fi + log_cmd ${DEST_DIR}/bin/pip install pyx fi if [ $INST_YT_SOURCE -eq 0 ] diff --git a/doc/source/analyzing/parallel_computation.rst b/doc/source/analyzing/parallel_computation.rst index 846c0e33f7f..73e291e83a9 100644 --- a/doc/source/analyzing/parallel_computation.rst +++ b/doc/source/analyzing/parallel_computation.rst @@ -110,7 +110,7 @@ processes using the following Bash command: .. code-block:: bash - $ mpirun -np 16 python2.7 my_script.py + $ mpirun -np 16 python my_script.py .. note:: diff --git a/doc/source/developing/debugdrive.rst b/doc/source/developing/debugdrive.rst index b398f89b5d1..a66daa9ebec 100644 --- a/doc/source/developing/debugdrive.rst +++ b/doc/source/developing/debugdrive.rst @@ -99,7 +99,7 @@ provided through the yt command. So if you run the command: .. code-block:: bash - $ mpirun -np 4 python2.7 some_script.py --parallel --rpdb + $ mpirun -np 4 python some_script.py --parallel --rpdb and it reaches an error or an exception, it will launch the debugger. Additionally, instructions will be printed for connecting to the debugger. diff --git a/doc/source/developing/external_analysis.rst b/doc/source/developing/external_analysis.rst index e4b9bcbe65b..bd58495f40c 100644 --- a/doc/source/developing/external_analysis.rst +++ b/doc/source/developing/external_analysis.rst @@ -188,7 +188,7 @@ To build our extension, we would run: .. code-block:: bash - $ python2.7 axes_calculator_setup.py build_ext -i + $ python axes_calculator_setup.py build_ext -i Note that since we don't yet have an ``axes_calculator.pyx``, this will fail. But once we have it, it ought to run. diff --git a/doc/source/developing/testing.rst b/doc/source/developing/testing.rst index a1a8180bd15..be3024fca4b 100644 --- a/doc/source/developing/testing.rst +++ b/doc/source/developing/testing.rst @@ -496,19 +496,3 @@ In order to add a new set of answer tests, it is sufficient to extend the + other_tests: unittests: - -Restricting Python Versions for Answer Tests -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -If for some reason a test can be run only for a specific version of python it is -possible to indicate this by adding a ``[py2]`` or ``[py3]`` tag. For example: - -.. code-block:: yaml - - answer_tests: - local_test_000: - - yt/test_A.py # [py2] - - yt/test_B.py # [py3] - -would result in ``test_A.py`` being run only for *python2* and ``test_B.py`` -being run only for *python3*. diff --git a/doc/source/installing.rst b/doc/source/installing.rst index 983ba62e026..aa4780d2a9c 100644 --- a/doc/source/installing.rst +++ b/doc/source/installing.rst @@ -413,8 +413,8 @@ development version of yt instead of the latest stable release, you will need if you are curious why ``--install-option="--prefix="`` is necessary on some systems. This will install yt into a folder in your home directory -(``$HOME/.local/lib64/python2.7/site-packages`` on Linux, -``$HOME/Library/Python/2.7/lib/python/site-packages/`` on OSX) Please refer to +(e.g. ``$HOME/.local/lib64/python/site-packages`` on Linux, +``$HOME/Library/Python//lib/python/site-packages/`` on OSX) Please refer to the ``setuptools`` documentation for the additional options. If you are unable to locate the ``yt`` executable (i.e. executing ``yt version`` diff --git a/setup.py b/setup.py index 106a0512742..7cb8e051ea1 100644 --- a/setup.py +++ b/setup.py @@ -140,5 +140,5 @@ zip_safe=False, scripts=["scripts/iyt"], ext_modules=[], # !!! We override this inside build_ext above - python_requires=">=2.7,!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*", + python_requires=">=3.6", ) From 25960e0b78697f9654f3d52948f09c45e8b79869 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 22:14:48 +0200 Subject: [PATCH 111/653] cleanup: remove version checks for python 3 --- doc/source/cookbook/tests/test_cookbook.py | 12 +++++++----- setup.py | 5 ----- setupext.py | 6 +----- tests/nose_runner.py | 6 +----- yt/frontends/art/io.py | 4 ---- yt/utilities/lodgeit.py | 10 ++-------- 6 files changed, 11 insertions(+), 32 deletions(-) diff --git a/doc/source/cookbook/tests/test_cookbook.py b/doc/source/cookbook/tests/test_cookbook.py index 5571fd0bd10..2a951d84b86 100644 --- a/doc/source/cookbook/tests/test_cookbook.py +++ b/doc/source/cookbook/tests/test_cookbook.py @@ -11,8 +11,8 @@ """ import glob import os -import sys import subprocess +import sys def run_with_capture(*args, **kwargs): @@ -35,10 +35,12 @@ def run_with_capture(*args, **kwargs): PARALLEL_TEST = {"rockstar_nest.py": "3"} -BLACKLIST = ["opengl_ipython.py", "opengl_vr.py", "matplotlib-animation.py"] - -if sys.version_info >= (3, 0, 0): - BLACKLIST.append("rockstar_nest.py") +BLACKLIST = [ + "opengl_ipython.py", + "opengl_vr.py", + "matplotlib-animation.py", + "rockstar_nest.py", +] def test_recipe(): diff --git a/setup.py b/setup.py index 106a0512742..c0bfd57f434 100644 --- a/setup.py +++ b/setup.py @@ -13,11 +13,6 @@ install_ccompiler, ) -if sys.version_info < (3, 5): - print("yt currently supports versions newer than Python 3.5") - print("certain features may fail unexpectedly and silently with older " "versions.") - sys.exit(1) - install_ccompiler() try: diff --git a/setupext.py b/setupext.py index 26707fec370..e60862b4515 100644 --- a/setupext.py +++ b/setupext.py @@ -61,14 +61,10 @@ def stdchannel_redirected(stdchannel, dest_filename): def check_for_openmp(): """Returns True if local setup supports OpenMP, False otherwise - Code adapted from astropy_helpers, originally written by Tom + Code adapted from astropy_helpers, originally written by Tom Robitaille and Curtis McCully. """ - # See https://bugs.python.org/issue25150 - if sys.version_info[:3] == (3, 5, 0): - return False - # Create a temporary directory ccompiler = new_compiler() customize_compiler(ccompiler) diff --git a/tests/nose_runner.py b/tests/nose_runner.py index 72f38e69553..5c69934d330 100644 --- a/tests/nose_runner.py +++ b/tests/nose_runner.py @@ -78,16 +78,12 @@ def __str__(self): def generate_tasks_input(): pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor) - if sys.version_info < (3, 0, 0): - DROP_TAG = "py3" - else: - DROP_TAG = "py2" test_dir = ytcfg.get("yt", "test_data_dir") answers_dir = os.path.join(test_dir, "answers") with open("tests/tests.yaml", "r") as obj: lines = obj.read() - data = "\n".join([line for line in lines.split("\n") if DROP_TAG not in line]) + data = "\n".join([line for line in lines.split("\n") if "py2" not in line]) tests = yaml.load(data, Loader=yaml.FullLoader) base_argv = ["-s", "--nologcapture", "--with-xunit"] diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index 233a58b22e2..7ff8e7ed332 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -1,6 +1,5 @@ import os import os.path -import sys from collections import defaultdict import numpy as np @@ -17,9 +16,6 @@ from yt.utilities.lib.geometry_utils import compute_morton from yt.utilities.logger import ytLogger as mylog -if sys.version_info >= (3, 0, 0): - long = int - class IOHandlerART(BaseIOHandler): _dataset_type = "art" diff --git a/yt/utilities/lodgeit.py b/yt/utilities/lodgeit.py index 7649781eef6..126f5ee6e6d 100644 --- a/yt/utilities/lodgeit.py +++ b/yt/utilities/lodgeit.py @@ -26,7 +26,6 @@ 2006 Matt Good , 2005 Raphael Slinckx """ - import os import sys from optparse import OptionParser @@ -204,10 +203,7 @@ def download_paste(uid): paste = xmlrpc.pastes.getPaste(uid) if not paste: fail('Paste "%s" does not exist.' % uid, 5) - if sys.version_info >= (3, 0, 0): - code = paste["code"] - else: - code = paste["code"].encode("utf-8") + code = paste["code"] print(code) @@ -323,9 +319,7 @@ def main( fail("Aborted, no content to paste.", 4) # create paste - code = make_utf8(data, encoding) - if sys.version_info >= (3, 0, 0): - code = code.decode("utf-8") + code = make_utf8(data, encoding).decode("utf-8") pid = create_paste(code, language, filename, mimetype, private) url = "%sshow/%s/" % (SERVICE_URL, pid) print(url) From 8215a5f33e15db11b77f53ce07a917786fd91639 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 14:06:33 +0200 Subject: [PATCH 112/653] fix: fix flake8-bugbear B006 (Do not use mutable data structures for argument defaults) errors --- setup.cfg | 2 +- yt/data_objects/data_containers.py | 4 ++- yt/data_objects/particle_container.py | 4 ++- yt/data_objects/selection_data_containers.py | 4 ++- yt/frontends/fits/data_structures.py | 8 ++++-- yt/frontends/open_pmd/data_structures.py | 6 ++++- yt/utilities/answer_testing/answer_tests.py | 8 ++++-- yt/utilities/lib/cykdtree/plot.py | 28 +++++++++++++++----- yt/visualization/eps_writer.py | 10 ++++--- 9 files changed, 55 insertions(+), 19 deletions(-) diff --git a/setup.cfg b/setup.cfg index a6db023776d..f0a4c73e5d0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -51,6 +51,6 @@ combine_as_imports=True line_length=88 # isort can't be applied to yt/__init__.py because it creates circular imports skip = venv, doc, benchmarks, yt/__init__.py, yt/extern -known_third_party = IPython, nose, numpy, sympy, matplotlib, unyt, git, yaml, dateutil, requests, coverage, pytest +known_third_party = IPython, nose, numpy, sympy, matplotlib, unyt, git, yaml, dateutil, requests, coverage, pytest, pyx known_first_party = yt sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 5a26fce13c5..d19d8b77b05 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -2120,7 +2120,7 @@ def __init__(self, center, ds, field_parameters=None, data_source=None): self.coords = None self._grids = None - def cut_region(self, field_cuts, field_parameters=None, locals={}): + def cut_region(self, field_cuts, field_parameters=None, locals=None): """ Return a YTCutRegion, where the a cell is identified as being inside the cut region based on the value of one or more fields. Note that in @@ -2151,6 +2151,8 @@ def cut_region(self, field_cuts, field_parameters=None, locals={}): >>> cr = ad.cut_region(["obj['temperature'] > 1e6"]) >>> print(cr.quantities.total_quantity("cell_mass").in_units('Msun')) """ + if locals is None: + locals = {} cr = self.ds.cut_region( self, field_cuts, field_parameters=field_parameters, locals=locals ) diff --git a/yt/data_objects/particle_container.py b/yt/data_objects/particle_container.py index 59709be71cd..f59ef908cd8 100644 --- a/yt/data_objects/particle_container.py +++ b/yt/data_objects/particle_container.py @@ -21,7 +21,9 @@ class ParticleContainer(YTSelectionContainer): _skip_add = True _con_args = ("base_region", "data_files", "overlap_files") - def __init__(self, base_region, data_files, overlap_files=[], domain_id=-1): + def __init__(self, base_region, data_files, overlap_files=None, domain_id=-1): + if overlap_files is None: + overlap_files = [] self.field_data = YTFieldData() self.field_parameters = {} self.data_files = ensure_list(data_files) diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 2ab2ccb4f40..e21acffeb07 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -1047,8 +1047,10 @@ def __init__( ds=None, field_parameters=None, base_object=None, - locals={}, + locals=None, ): + if locals is None: + locals = {} validate_object(data_source, YTSelectionContainer) validate_iterable(conditionals) for condition in conditionals: diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 3334c9463a1..09b5be30f46 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -318,7 +318,7 @@ def __init__( self, filename, dataset_type="fits", - auxiliary_files=[], + auxiliary_files=None, nprocs=None, storage_filename=None, nan_mask=None, @@ -328,6 +328,8 @@ def __init__( unit_system="cgs", ): + if auxiliary_files is None: + auxiliary_files = [] if parameters is None: parameters = {} parameters["nprocs"] = nprocs @@ -723,7 +725,7 @@ class SpectralCubeFITSDataset(SkyDataFITSDataset): def __init__( self, filename, - auxiliary_files=[], + auxiliary_files=None, nprocs=None, storage_filename=None, nan_mask=None, @@ -734,6 +736,8 @@ def __init__( unit_system="cgs", z_axis_decomp=None, ): + if auxiliary_files is None: + auxiliary_files = [] self.spectral_factor = spectral_factor if z_axis_decomp is not None: issue_deprecation_warning( diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 448aae60d3a..2821c7f3c86 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -43,8 +43,12 @@ class OpenPMDGrid(AMRGridPatch): pindex = 0 poffset = 0 - def __init__(self, gid, index, level=-1, fi=0, fo=0, pi=0, po=0, ft=[], pt=[]): + def __init__(self, gid, index, level=-1, fi=0, fo=0, pi=0, po=0, ft=None, pt=None): AMRGridPatch.__init__(self, gid, filename=index.index_filename, index=index) + if ft is None: + ft = [] + if pt is None: + pt = [] self.findex = fi self.foffset = fo self.pindex = pi diff --git a/yt/utilities/answer_testing/answer_tests.py b/yt/utilities/answer_testing/answer_tests.py index 7665df50d91..a74ebf17759 100644 --- a/yt/utilities/answer_testing/answer_tests.py +++ b/yt/utilities/answer_testing/answer_tests.py @@ -215,8 +215,10 @@ def plot_window_attribute( attr_args, plot_type="SlicePlot", callback_id="", - callback_runners=[], + callback_runners=None, ): + if callback_runners is None: + callback_runners = [] plot = utils._create_plot_window_attribute_plot( ds, plot_type, plot_field, plot_axis, {} ) @@ -240,8 +242,10 @@ def phase_plot_attribute( attr_name, attr_args, plot_type="PhasePlot", - plot_kwargs={}, + plot_kwargs=None, ): + if plot_kwargs is None: + plot_kwargs = {} data_source = ds_fn.all_data() plot = utils._create_phase_plot_attribute_plot( data_source, x_field, y_field, z_field, plot_type, plot_kwargs diff --git a/yt/utilities/lib/cykdtree/plot.py b/yt/utilities/lib/cykdtree/plot.py index 8e7e11ebb90..52b8d8933e9 100644 --- a/yt/utilities/lib/cykdtree/plot.py +++ b/yt/utilities/lib/cykdtree/plot.py @@ -6,17 +6,17 @@ def _plot2D_root( pts=None, txt=None, plotfile=None, - point_kw={}, - box_kw={}, + point_kw=None, + box_kw=None, axs=None, - subplot_kw={}, - gridspec_kw={}, - fig_kw={}, - save_kw={}, + subplot_kw=None, + gridspec_kw=None, + fig_kw=None, + save_kw=None, title=None, xlabel="x", ylabel="y", - label_kw={}, + label_kw=None, ): r"""Plot a 2D kd-tree. @@ -62,6 +62,20 @@ def _plot2D_root( import matplotlib.pyplot as plt from matplotlib.collections import LineCollection + if point_kw is None: + point_kw = {} + if box_kw is None: + box_kw = {} + if subplot_kw is None: + subplot_kw = {} + if gridspec_kw is None: + gridspec_kw = {} + if fig_kw is None: + fig_kw = {} + if save_kw is None: + save_kw = {} + if label_kw is None: + label_kw = {} # Axes creation if axs is None: plt.close("all") diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 22d0b89a589..40dfac05320 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -667,7 +667,7 @@ def colorbar( log=False, tickcolor=None, orientation="right", - pos=[0, 0], + pos=None, shrink=1.0, ): r"""Places a colorbar adjacent to the current figure. @@ -702,6 +702,9 @@ def colorbar( label="Density [cm$^{-3}$]") >>> d.save_fig() """ + if pos is None: + pos = [0, 0] + if orientation == "right": origin = (pos[0] + self.figsize[0] + 0.5, pos[1]) size = (0.1 * self.figsize[0], self.figsize[1]) @@ -1084,7 +1087,7 @@ def title_box( loc=(0.02, 0.98), halign=pyx.text.halign.left, valign=pyx.text.valign.top, - text_opts=[], + text_opts=None, ): r"""Inserts a box with text in the current figure. @@ -1111,6 +1114,8 @@ def title_box( >>> d.title_box("Halo 1", loc=(0.05,0.95)) >>> d.save_fig() """ + if text_opts is None: + text_opts = [] tbox = self.canvas.text( self.figsize[0] * loc[0], self.figsize[1] * loc[1], @@ -1138,7 +1143,6 @@ def save_fig(self, filename="test", format="eps", resolution=250): -------- >>> d = DualEPS() >>> d.axis_box(xrange=(0,100), yrange=(1e-3,1), ylog=True) - >>> d.save_fig("image1", format="pdf") """ if format == "eps": self.canvas.writeEPSfile(filename) From 8939e4d017276bceff7fd71ed0780f8c1e37146a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 23 Jun 2020 17:33:27 +0200 Subject: [PATCH 113/653] ci: add flake8-bugbear to travis linting --- setup.cfg | 3 +++ tests/lint_requirements.txt | 1 + 2 files changed, 4 insertions(+) diff --git a/setup.cfg b/setup.cfg index f0a4c73e5d0..2d0bd8cbbad 100644 --- a/setup.cfg +++ b/setup.cfg @@ -39,6 +39,9 @@ ignore = E203, # Whitespace before ':' (black compatibility) E741, # Do not use variables named 'I', 'O', or 'l' W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' + B005, # "Using .strip() with multi-character strings is misleading the reader." + B007, # "Loop control variable not used within the loop body." TODO: handle + B302, # this is a python 3 compatibility warning, not relevant since don't support python 2 anymore jobs=8 diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 33da678badf..52a3fd3b476 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -4,3 +4,4 @@ pycodestyle==2.6.0 pyflakes==2.2.0 isort~=5.1 black==19.10b0 +flake8-bugbear From d02086364e42c440a5df8bb204db8b98042d7b87 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 13 Jul 2020 14:00:35 -0500 Subject: [PATCH 114/653] adding render option to scene.save() to avoid double rendering --- yt/visualization/volume_rendering/scene.py | 53 +++++++++++++------ .../volume_rendering/volume_rendering.py | 2 +- 2 files changed, 39 insertions(+), 16 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 7b8df0e4f94..9232209e3df 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -204,6 +204,16 @@ def render(self, camera=None): >>> sc = yt.create_scene(ds) >>> # Modify camera, sources, etc... >>> im = sc.render() + >>> sc.save(sigma_clip=4.0,render=False) + + Altneratively, if you do not need the image array, you can just call + save as follows. + + >>> import yt + >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') + >>> + >>> sc = yt.create_scene(ds) + >>> # Modify camera, sources, etc... >>> sc.save(sigma_clip=4.0) """ @@ -216,13 +226,13 @@ def render(self, camera=None): self._last_render = bmp return bmp - def save(self, fname=None, sigma_clip=None): + def save(self, fname=None, sigma_clip=None, render=True): r"""Saves the most recently rendered image of the Scene to disk. - Once you have created a scene and rendered that scene to an image - array, this saves that image array to disk with an optional filename. - If an image has not yet been rendered for the current scene object, - it forces one and writes it out. + Once you have created a scene, this saves that image array to disk with + an optional filename. This function calls render() to generate an + image array, unless the render parameter is set to False, in which case + the most recently rendered scene is used if it exists. Parameters ---------- @@ -239,6 +249,10 @@ def save(self, fname=None, sigma_clip=None): Default: None floor(vals > std_dev*sigma_clip + mean) + render: boolean, optional + If True, will always render the scene before saving. + If False, will use results of previous render if it exists. + Default: True Returns ------- @@ -252,10 +266,11 @@ def save(self, fname=None, sigma_clip=None): >>> >>> sc = yt.create_scene(ds) >>> # Modify camera, sources, etc... - >>> sc.render() >>> sc.save('test.png', sigma_clip=4) - Or alternatively: + When saving multiple images without modifying the scene (camera, + sources,etc.), render=False can be used to avoid re-rendering. + This is useful for generating images at a range of sigma_clip values: >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') @@ -263,8 +278,8 @@ def save(self, fname=None, sigma_clip=None): >>> sc = yt.create_scene(ds) >>> # save with different sigma clipping values >>> sc.save('raw.png') - >>> sc.save('clipped_2.png', sigma_clip=2) - >>> sc.save('clipped_4.png', sigma_clip=4) + >>> sc.save('clipped_2.png', sigma_clip=2, render=False) + >>> sc.save('clipped_4.png', sigma_clip=4, render=False) """ if fname is None: @@ -287,7 +302,11 @@ def save(self, fname=None, sigma_clip=None): suffix = ".png" fname = "%s%s" % (fname, suffix) - self.render() + # in most cases we want to render every time, but in some cases pulling + # the previous render is desirable (e.g., if only changing sigma_clip or + # saving after a call to sc.show()). + if render or hasattr(self,'_last_render') is False: + self.render() mylog.info("Saving render %s", fname) # We can render pngs natively but for other formats we defer to @@ -320,9 +339,9 @@ def save(self, fname=None, sigma_clip=None): ax.imshow(np.rot90(out), origin="lower") canvas.print_figure(fname, dpi=100) - def save_annotated( - self, fname=None, label_fmt=None, text_annotate=None, dpi=100, sigma_clip=None - ): + def save_annotated(self, fname=None, label_fmt=None, + text_annotate=None, dpi=100, sigma_clip=None, + render=True): r"""Saves the most recently rendered image of the Scene to disk, including an image of the transfer function and and user-defined text. @@ -360,7 +379,10 @@ def save_annotated( function. Each item in the main list is a separate string to write. - + render: boolean, optional + If True, will always render the scene before saving. + If False, will use results of previous render if it exists. + Default: True Returns ------- @@ -407,7 +429,8 @@ def save_annotated( suffix = ".png" fname = "%s%s" % (fname, suffix) - self.render() + if render or hasattr(self,'_last_render') is False: + self.render() # which transfer function? rs = rensources[0] diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index d0867183eef..58278ffa375 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -124,5 +124,5 @@ def volume_render( """ sc = create_scene(data_source, field=field) im = sc.render() - sc.save(fname=fname, sigma_clip=sigma_clip) + sc.save(fname=fname, sigma_clip=sigma_clip, render=False) return im, sc From 2e029ca873633641ea06b3d778a250656e4521bd Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 13 Jul 2020 14:03:33 -0500 Subject: [PATCH 115/653] typo in scene.save() docstring --- yt/visualization/volume_rendering/scene.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 9232209e3df..6faeb2a08cd 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -207,7 +207,7 @@ def render(self, camera=None): >>> sc.save(sigma_clip=4.0,render=False) Altneratively, if you do not need the image array, you can just call - save as follows. + save as follows. >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') @@ -229,7 +229,7 @@ def render(self, camera=None): def save(self, fname=None, sigma_clip=None, render=True): r"""Saves the most recently rendered image of the Scene to disk. - Once you have created a scene, this saves that image array to disk with + Once you have created a scene, this saves an image array to disk with an optional filename. This function calls render() to generate an image array, unless the render parameter is set to False, in which case the most recently rendered scene is used if it exists. From 84e8599c71f9ea1894f6e14c6ad0f9f12ad85ad3 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 13 Jul 2020 14:40:33 -0500 Subject: [PATCH 116/653] adding unit test for scene.save() render arg --- .../tests/test_save_render.py | 39 +++++++++++++++++++ 1 file changed, 39 insertions(+) create mode 100644 yt/visualization/volume_rendering/tests/test_save_render.py diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py new file mode 100644 index 00000000000..f34223437ad --- /dev/null +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -0,0 +1,39 @@ +import os +import tempfile +import shutil +import yt +from yt.testing import fake_random_ds +from unittest import TestCase + +def setup(): + """Test specific setup.""" + from yt.config import ytcfg + ytcfg["yt", "__withintesting"] = "True" + + +class SaveRenderTest(TestCase): + # This toggles using a temporary directory. Turn off to examine images. + use_tmpdir = True + + def setUp(self): + if self.use_tmpdir: + self.curdir = os.getcwd() + # Perform I/O in safe place instead of yt main dir + self.tmpdir = tempfile.mkdtemp() + os.chdir(self.tmpdir) + else: + self.curdir, self.tmpdir = None, None + + def tearDown(self): + if self.use_tmpdir: + os.chdir(self.curdir) + shutil.rmtree(self.tmpdir) + + def test_save_render(self): + ds = fake_random_ds(32) + sc = yt.create_scene(ds) + sc.save('raw.png') # will use render = True by default + sc.save('clip_2.png', sigma_clip=2, render=False) # will pull render + sc.save('clip_4.png', sigma_clip=4.0, render=False) + + return sc From 22255468758372d67402b1016d5851341e94117d Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Tue, 14 Jul 2020 16:23:19 -0500 Subject: [PATCH 117/653] added note on render keyword to vol render FAQ --- doc/source/visualizing/volume_rendering.rst | 55 +++++++++++++++------ 1 file changed, 40 insertions(+), 15 deletions(-) diff --git a/doc/source/visualizing/volume_rendering.rst b/doc/source/visualizing/volume_rendering.rst index 39192b335df..8b108126026 100644 --- a/doc/source/visualizing/volume_rendering.rst +++ b/doc/source/visualizing/volume_rendering.rst @@ -80,7 +80,7 @@ The scene's most important functions are :meth:`~yt.visualization.volume_rendering.scene.Scene.render` for casting rays through the scene and :meth:`~yt.visualization.volume_rendering.scene.Scene.save` for saving the -resulting rendered image to disk. +resulting rendered image to disk (see note on :ref:`when_to_render`). The easiest way to create a scene with sensible defaults is to use the functions: @@ -329,7 +329,7 @@ To add a single gaussian layer with a color determined by a colormap value, use source.tfh.plot('transfer_function.png', profile_field='density') sc.save('rendering.png', sigma_clip=6) - + add_gaussian """""""""""" @@ -400,7 +400,7 @@ the volume rendering. def linramp(vals, minval, maxval): return (vals - vals.min())/(vals.max() - vals.min()) - tf.map_to_colormap(np.log10(3e-31), np.log10(5e-27), colormap='arbre', + tf.map_to_colormap(np.log10(3e-31), np.log10(5e-27), colormap='arbre', scale_func=linramp) source.tfh.tf = tf @@ -408,7 +408,7 @@ the volume rendering. source.tfh.plot('transfer_function.png', profile_field='density') - sc.save('rendering.png', sigma_clip=6) + sc.save('rendering.png', sigma_clip=6) Projection Transfer Function ++++++++++++++++++++++++++++ @@ -522,10 +522,10 @@ adjusts for an opening view angle, so that the scene will have an element of perspective to it. :class:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens` is identical to PerspectiveLens, but it produces two images from nearby -camera positions for use in 3D viewing. How 3D the image appears at viewing -will depend upon the value of -:attr:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens.disparity`, -which is half the maximum distance between two corresponding points in the left +camera positions for use in 3D viewing. How 3D the image appears at viewing +will depend upon the value of +:attr:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens.disparity`, +which is half the maximum distance between two corresponding points in the left and right images. By default, it is set to 3 pixels. @@ -556,11 +556,11 @@ see `the YouTube help: Upload virtual reality videos `_). `This virtual reality video `_ on YouTube is an example produced with -:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`. As in -the case of -:class:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens`, the -difference between the two images can be controlled by changing the value of -:attr:`~yt.visualization.volume_rendering.lens.StereoSphericalLens.disparity` +:class:`~yt.visualization.volume_rendering.lens.StereoSphericalLens`. As in +the case of +:class:`~yt.visualization.volume_rendering.lens.StereoPerspectiveLens`, the +difference between the two images can be controlled by changing the value of +:attr:`~yt.visualization.volume_rendering.lens.StereoSphericalLens.disparity` (See above). .. _annotated-vr-example: @@ -573,7 +573,7 @@ Annotated Examples information can be hard. We've provided information about best practices and tried to make the interface easy to develop nice visualizations, but getting them *just right* is often - time-consuming. It's usually best to start out simple and expand + time-consuming. It's usually best to start out simple and expand and tweak as needed. The scene interface provides a modular interface for creating renderings @@ -653,7 +653,7 @@ function. Example: import numpy as np import yt - + ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sc = yt.create_scene(ds, 'density') @@ -856,3 +856,28 @@ dark. ``sigma_clip = N`` can address this by removing values that are more than ``N`` standard deviations brighter than the mean of your image. Typically, a choice of 4 to 6 will help dramatically with your resulting image. See the cookbook recipe :ref:`cookbook-sigma_clip` for a demonstration. + +.. _when_to_render: + +When to Render +^^^^^^^^^^^^^^ + +The rendering of a scene is the most computationally demanding step in +creating a final image and there are a number of ways to control at which point +a scene is actually rendered. The default behavior of the +:meth:`~yt.visualization.volume_rendering.scene.Scene.save` function includes +a call to :meth:`~yt.visualization.volume_rendering.scene.Scene.render`. This +means that in most cases (including the above examples), after you set up your +scene and volumes, you can simply call +:meth:`~yt.visualization.volume_rendering.scene.Scene.save` without first +calling :meth:`~yt.visualization.volume_rendering.scene.Scene.render`. If you +wish to save the most recently rendered image without rendering again, set +``render=False`` in the call to +:meth:`~yt.visualization.volume_rendering.scene.Scene.save` and the most +recently rendered image array will be used. Cases where you +may wish to use ``render=False`` include saving images at different +``sigma_clip`` values (see :ref:`cookbook-sigma_clip`) or when saving an image +that has already been rendered in a Jupyter notebook using +:meth:`~yt.visualization.volume_rendering.scene.Scene.show`. Changes to the +scene including adding sources, modifying transfer functions or adjusting camera +settings generally require rendering again. From c2d1a9c361e3eb9f6c7751ebbe99aa0d0fbe6fb1 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Tue, 14 Jul 2020 16:43:05 -0500 Subject: [PATCH 118/653] Update yt/visualization/volume_rendering/scene.py Co-authored-by: Madicken Munk --- yt/visualization/volume_rendering/scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 6faeb2a08cd..ad56c98ee59 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -269,7 +269,7 @@ def save(self, fname=None, sigma_clip=None, render=True): >>> sc.save('test.png', sigma_clip=4) When saving multiple images without modifying the scene (camera, - sources,etc.), render=False can be used to avoid re-rendering. + sources,etc.), render=False can be used to avoid re-rendering when a scene is saved. This is useful for generating images at a range of sigma_clip values: >>> import yt From 517a6a9fb827148fb771d0d229dd479c76a6a57a Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Tue, 14 Jul 2020 16:43:15 -0500 Subject: [PATCH 119/653] Update yt/visualization/volume_rendering/scene.py Co-authored-by: Madicken Munk --- yt/visualization/volume_rendering/scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index ad56c98ee59..2cfda437065 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -277,7 +277,7 @@ def save(self, fname=None, sigma_clip=None, render=True): >>> >>> sc = yt.create_scene(ds) >>> # save with different sigma clipping values - >>> sc.save('raw.png') + >>> sc.save('raw.png') # The initial render call happens here >>> sc.save('clipped_2.png', sigma_clip=2, render=False) >>> sc.save('clipped_4.png', sigma_clip=4, render=False) From d2ffea3e188d85fcefda89ee03f1839b55f823fc Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Tue, 14 Jul 2020 16:43:23 -0500 Subject: [PATCH 120/653] Update yt/visualization/volume_rendering/scene.py Co-authored-by: Madicken Munk --- yt/visualization/volume_rendering/scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 2cfda437065..37101504d46 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -380,7 +380,7 @@ def save_annotated(self, fname=None, label_fmt=None, Each item in the main list is a separate string to write. render: boolean, optional - If True, will always render the scene before saving. + If True, will render the scene before saving. If False, will use results of previous render if it exists. Default: True From 87fe5627f270e814d0a51214feb764891eedb776 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 08:31:23 -0500 Subject: [PATCH 121/653] Update yt/visualization/volume_rendering/scene.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/visualization/volume_rendering/scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 37101504d46..c3702001b07 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -207,7 +207,7 @@ def render(self, camera=None): >>> sc.save(sigma_clip=4.0,render=False) Altneratively, if you do not need the image array, you can just call - save as follows. + ``save`` as follows. >>> import yt >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') From dce4bfd8d6791cca4f0a25b5df54d4598f9fa795 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 08:32:57 -0500 Subject: [PATCH 122/653] Update yt/visualization/volume_rendering/tests/test_save_render.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/visualization/volume_rendering/tests/test_save_render.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py index f34223437ad..4b41a728305 100644 --- a/yt/visualization/volume_rendering/tests/test_save_render.py +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -30,7 +30,7 @@ def tearDown(self): shutil.rmtree(self.tmpdir) def test_save_render(self): - ds = fake_random_ds(32) + ds = fake_random_ds(ndims=32) sc = yt.create_scene(ds) sc.save('raw.png') # will use render = True by default sc.save('clip_2.png', sigma_clip=2, render=False) # will pull render From a16536d4f5458440c00904797f0857af3eb95905 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 16:07:50 -0500 Subject: [PATCH 123/653] improving render check --- .../Volume_Rendering_Tutorial.ipynb | 178 ++++++++++++++---- doc/source/visualizing/volume_rendering.rst | 3 +- yt/visualization/volume_rendering/scene.py | 31 ++- .../tests/test_save_render.py | 21 ++- 4 files changed, 171 insertions(+), 62 deletions(-) diff --git a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb index 74dc7c1b221..d2a8ffc9df3 100644 --- a/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb +++ b/doc/source/visualizing/Volume_Rendering_Tutorial.ipynb @@ -4,14 +4,31 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "This notebook shows how to use the new (in version 3.3) Scene interface to create custom volume renderings. To begin, we load up a dataset and use the yt.create_scene method to set up a basic Scene. We store the Scene in a variable called 'sc' and render the default ('gas', 'density') field." + "# Volume Rendering Tutorial \n", + "\n", + "This notebook shows how to use the new (in version 3.3) Scene interface to create custom volume renderings. The tutorial proceeds in the following steps: \n", + "\n", + "1. [Creating the Scene](#1.-Creating-the-Scene)\n", + "2. [Displaying the Scene](#2.-Displaying-the-Scene)\n", + "3. [Adjusting Transfer Functions](#3.-Adjusting-Transfer-Functions)\n", + "4. [Saving an Image](#4.-Saving-an-Image)\n", + "5. [Adding Annotations](#5.-Adding-Annotations)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 1. Creating the Scene \n", + "\n", + "To begin, we load up a dataset and use the `yt.create_scene` method to set up a basic Scene. We store the Scene in a variable called `sc` and render the default `('gas', 'density')` field." ] }, { "cell_type": "code", "execution_count": null, "metadata": { - "collapsed": false + "scrolled": true }, "outputs": [], "source": [ @@ -28,15 +45,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "Note that to render a different field, we would use pass the field name to `yt.create_scene` using the `field` argument. \n", + "\n", "Now we can look at some information about the Scene we just created using the python print keyword:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (sc)" @@ -52,9 +69,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (sc.get_source())" @@ -64,15 +79,15 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We can see that the yt.create_source has created a VolumeSource with default values for the center, bounds, and transfer function. Now, let's see what this Scene looks like. In the notebook, we can do this by calling sc.show(). " + "## 2. Displaying the Scene \n", + "\n", + "We can see that the `yt.create_source` method has created a `VolumeSource` with default values for the center, bounds, and transfer function. Now, let's see what this Scene looks like. In the notebook, we can do this by calling `sc.show()`. " ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sc.show()" @@ -88,9 +103,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sc.camera.zoom(3.0)" @@ -106,9 +119,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "print (sc)" @@ -118,15 +129,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "To see what this looks like, we re-render the image and display the scene again. Note that we don't actually have to call sc.show() here - we can just have Ipython evaluate the Scene and that will display it automatically." + "To see what this looks like, we re-render the image and display the scene again. Note that we don't actually have to call `sc.show()` here - we can just have Ipython evaluate the Scene and that will display it automatically." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sc.render()\n", @@ -137,15 +146,13 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "That's better! The image looks a little washed-out though, so we use the sigma_clip argument to sc.show() to improve the contrast:" + "That's better! The image looks a little washed-out though, so we use the `sigma_clip` argument to `sc.show()` to improve the contrast:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sc.show(sigma_clip=4.0)" @@ -155,15 +162,65 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we demonstrate how to change the mapping between the field values and the colors in the image. We use the TransferFunctionHelper to create a new transfer function using the \"gist_rainbow\" colormap, and then re-create the image as follows:" + "Applying different values of `sigma_clip` with `sc.show()` is a relatively fast process because `sc.show()` will pull the most recently rendered image and apply the contrast adjustment without rendering the scene again. While this is useful for quickly testing the affect of different values of `sigma_clip`, it can lead to confusion if we don't remember to render after making changes to the camera. For example, if we zoom in again and simply call `sc.show()`, then we get the same image as before:" ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, + "outputs": [], + "source": [ + "sc.camera.zoom(3.0)\n", + "sc.show(sigma_clip=4.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "For the change to the camera to take affect, we have to explictly render again: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sc.render()\n", + "sc.show(sigma_clip=4.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "As a general rule, any changes to the scene itself such as adjusting the camera or changing transfer functions requires rendering again. Before moving on, let's undo the last zoom:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sc.camera.zoom(1./3.0)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 3. Adjusting Transfer Functions\n", + "\n", + "Next, we demonstrate how to change the mapping between the field values and the colors in the image. We use the TransferFunctionHelper to create a new transfer function using the `gist_rainbow` colormap, and then re-create the image as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, "outputs": [], "source": [ "# Set up a custom transfer function using the TransferFunctionHelper. \n", @@ -194,9 +251,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "cam = sc.add_camera(ds, lens_type='perspective')\n", @@ -226,9 +281,7 @@ { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "sc.render()\n", @@ -239,15 +292,51 @@ "cell_type": "markdown", "metadata": {}, "source": [ + "## 4. Saving an Image\n", + "\n", + "To save a volume rendering to an image file at any point, we can use `sc.save` as follows:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sc.save('volume_render.png',render=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Including the keyword argument `render=False` indicates that the most recently rendered image will be saved (otherwise, `sc.save()` will trigger a call to `sc.render()`). This behavior differs from `sc.show()`, which always uses the most recently rendered image. \n", + "\n", + "An additional caveat is that if we used `sigma_clip` in our call to `sc.show()`, then we must **also** pass it to `sc.save()` as sigma clipping is applied on top of a rendered image array. In that case, we would do the following: " + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "sc.save('volume_render_clip4.png',sigma_clip=4.0,render=False)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## 5. Adding Annotations\n", + "\n", "Finally, the next cell restores the lens and the transfer function to the defaults, moves the camera, and adds an opaque source that shows the axes of the simulation coordinate system." ] }, { "cell_type": "code", "execution_count": null, - "metadata": { - "collapsed": false - }, + "metadata": {}, "outputs": [], "source": [ "# set the lens type back to plane-parallel\n", @@ -263,6 +352,13 @@ "sc.render()\n", "sc.show(sigma_clip=4.0)" ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] } ], "metadata": { @@ -281,9 +377,9 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.5.1" + "version": "3.8.3" } }, "nbformat": 4, - "nbformat_minor": 0 + "nbformat_minor": 1 } diff --git a/doc/source/visualizing/volume_rendering.rst b/doc/source/visualizing/volume_rendering.rst index 8b108126026..d4d331ede01 100644 --- a/doc/source/visualizing/volume_rendering.rst +++ b/doc/source/visualizing/volume_rendering.rst @@ -873,8 +873,7 @@ scene and volumes, you can simply call calling :meth:`~yt.visualization.volume_rendering.scene.Scene.render`. If you wish to save the most recently rendered image without rendering again, set ``render=False`` in the call to -:meth:`~yt.visualization.volume_rendering.scene.Scene.save` and the most -recently rendered image array will be used. Cases where you +:meth:`~yt.visualization.volume_rendering.scene.Scene.save`. Cases where you may wish to use ``render=False`` include saving images at different ``sigma_clip`` values (see :ref:`cookbook-sigma_clip`) or when saving an image that has already been rendered in a Jupyter notebook using diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index c3702001b07..47a97505ec9 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -226,8 +226,25 @@ def render(self, camera=None): self._last_render = bmp return bmp + def _check_render(self, fname, render=True): + # checks for existing render before saving, in most cases we want to + # render every time, but in some cases pulling the previous render is + # desirable (e.g., if only changing sigma_clip or + # saving after a call to sc.show()). + if self._last_render is None: + mylog.warning("No previous rendered image found, rendering now and saving to %s", fname) + render = True + elif render: + mylog.info("Overwriting previous rendered image with new rendering, saving to %s", fname) + else: + mylog.info("Saving most recently rendered image to %s.",fname) + + + if render: + self.render() + def save(self, fname=None, sigma_clip=None, render=True): - r"""Saves the most recently rendered image of the Scene to disk. + r"""Saves a rendered image of the Scene to disk. Once you have created a scene, this saves an image array to disk with an optional filename. This function calls render() to generate an @@ -302,13 +319,8 @@ def save(self, fname=None, sigma_clip=None, render=True): suffix = ".png" fname = "%s%s" % (fname, suffix) - # in most cases we want to render every time, but in some cases pulling - # the previous render is desirable (e.g., if only changing sigma_clip or - # saving after a call to sc.show()). - if render or hasattr(self,'_last_render') is False: - self.render() - - mylog.info("Saving render %s", fname) + self._check_render(fname, render) + # We can render pngs natively but for other formats we defer to # matplotlib. if suffix == ".png": @@ -429,8 +441,7 @@ def save_annotated(self, fname=None, label_fmt=None, suffix = ".png" fname = "%s%s" % (fname, suffix) - if render or hasattr(self,'_last_render') is False: - self.render() + self._check_render(fname, render) # which transfer function? rs = rensources[0] diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py index 4b41a728305..1cf84f650db 100644 --- a/yt/visualization/volume_rendering/tests/test_save_render.py +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -14,26 +14,29 @@ def setup(): class SaveRenderTest(TestCase): # This toggles using a temporary directory. Turn off to examine images. use_tmpdir = True + tmpdir = './' def setUp(self): if self.use_tmpdir: - self.curdir = os.getcwd() - # Perform I/O in safe place instead of yt main dir + tempfile.mkdtemp() self.tmpdir = tempfile.mkdtemp() - os.chdir(self.tmpdir) - else: - self.curdir, self.tmpdir = None, None def tearDown(self): if self.use_tmpdir: - os.chdir(self.curdir) shutil.rmtree(self.tmpdir) def test_save_render(self): ds = fake_random_ds(ndims=32) sc = yt.create_scene(ds) - sc.save('raw.png') # will use render = True by default - sc.save('clip_2.png', sigma_clip=2, render=False) # will pull render - sc.save('clip_4.png', sigma_clip=4.0, render=False) + + # make sure it renders if nothing exists, even if render = False + sc.save(os.path.join(self.tmpdir,'raw.png'), render = False) + # make sure it re-renders + sc.save(os.path.join(self.tmpdir,'raw_2.png'), render = True) + # make sure sigma clip does not re-render + sc.save(os.path.join(self.tmpdir,'clip_2.png'), sigma_clip=2., + render=False) + sc.save(os.path.join(self.tmpdir,'clip_4.png'), sigma_clip=4., + render=False) return sc From ba6ef8063c8aa556aea5c39bbb74e7247530f879 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 16:30:02 -0500 Subject: [PATCH 124/653] whitespace fixes --- yt/visualization/volume_rendering/scene.py | 21 +++++++++---------- .../tests/test_save_render.py | 18 ++++++++-------- 2 files changed, 19 insertions(+), 20 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 47a97505ec9..cd344c53927 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -226,9 +226,9 @@ def render(self, camera=None): self._last_render = bmp return bmp - def _check_render(self, fname, render=True): - # checks for existing render before saving, in most cases we want to - # render every time, but in some cases pulling the previous render is + def _check_render(self, fname, render=True): + # checks for existing render before saving, in most cases we want to + # render every time, but in some cases pulling the previous render is # desirable (e.g., if only changing sigma_clip or # saving after a call to sc.show()). if self._last_render is None: @@ -236,13 +236,12 @@ def _check_render(self, fname, render=True): render = True elif render: mylog.info("Overwriting previous rendered image with new rendering, saving to %s", fname) - else: - mylog.info("Saving most recently rendered image to %s.",fname) - - - if render: + else: + mylog.info("Saving most recently rendered image to %s.",fname) + + if render: self.render() - + def save(self, fname=None, sigma_clip=None, render=True): r"""Saves a rendered image of the Scene to disk. @@ -319,8 +318,8 @@ def save(self, fname=None, sigma_clip=None, render=True): suffix = ".png" fname = "%s%s" % (fname, suffix) - self._check_render(fname, render) - + self._check_render(fname, render) + # We can render pngs natively but for other formats we defer to # matplotlib. if suffix == ".png": diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py index 1cf84f650db..e5876617a1a 100644 --- a/yt/visualization/volume_rendering/tests/test_save_render.py +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -29,14 +29,14 @@ def test_save_render(self): ds = fake_random_ds(ndims=32) sc = yt.create_scene(ds) - # make sure it renders if nothing exists, even if render = False - sc.save(os.path.join(self.tmpdir,'raw.png'), render = False) - # make sure it re-renders - sc.save(os.path.join(self.tmpdir,'raw_2.png'), render = True) - # make sure sigma clip does not re-render - sc.save(os.path.join(self.tmpdir,'clip_2.png'), sigma_clip=2., - render=False) - sc.save(os.path.join(self.tmpdir,'clip_4.png'), sigma_clip=4., - render=False) + # make sure it renders if nothing exists, even if render = False + sc.save(os.path.join(self.tmpdir,'raw.png'), render=False) + # make sure it re-renders + sc.save(os.path.join(self.tmpdir,'raw_2.png'), render=True) + # make sure sigma clip does not re-render + sc.save(os.path.join(self.tmpdir,'clip_2.png'), sigma_clip=2., + render=False) + sc.save(os.path.join(self.tmpdir,'clip_4.png'), sigma_clip=4., + render=False) return sc From 903e065e95c8c0dea4dc33bee7e195a9b96b9002 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 17:03:33 -0500 Subject: [PATCH 125/653] black formatting fixes --- yt/visualization/volume_rendering/scene.py | 24 ++++++++++++++----- .../tests/test_save_render.py | 14 +++++------ 2 files changed, 25 insertions(+), 13 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index cd344c53927..fe3538d789c 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -232,12 +232,18 @@ def _check_render(self, fname, render=True): # desirable (e.g., if only changing sigma_clip or # saving after a call to sc.show()). if self._last_render is None: - mylog.warning("No previous rendered image found, rendering now and saving to %s", fname) + mylog.warning( + "No previous rendered image found, rendering now and saving to %s", + fname, + ) render = True elif render: - mylog.info("Overwriting previous rendered image with new rendering, saving to %s", fname) + mylog.info( + "Overwriting previous rendered image with new rendering, saving to %s", + fname, + ) else: - mylog.info("Saving most recently rendered image to %s.",fname) + mylog.info("Saving most recently rendered image to %s.", fname) if render: self.render() @@ -350,9 +356,15 @@ def save(self, fname=None, sigma_clip=None, render=True): ax.imshow(np.rot90(out), origin="lower") canvas.print_figure(fname, dpi=100) - def save_annotated(self, fname=None, label_fmt=None, - text_annotate=None, dpi=100, sigma_clip=None, - render=True): + def save_annotated( + self, + fname=None, + label_fmt=None, + text_annotate=None, + dpi=100, + sigma_clip=None, + render=True, + ): r"""Saves the most recently rendered image of the Scene to disk, including an image of the transfer function and and user-defined text. diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py index e5876617a1a..0ce47a5f9d4 100644 --- a/yt/visualization/volume_rendering/tests/test_save_render.py +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -5,16 +5,18 @@ from yt.testing import fake_random_ds from unittest import TestCase + def setup(): """Test specific setup.""" from yt.config import ytcfg + ytcfg["yt", "__withintesting"] = "True" class SaveRenderTest(TestCase): # This toggles using a temporary directory. Turn off to examine images. use_tmpdir = True - tmpdir = './' + tmpdir = "./" def setUp(self): if self.use_tmpdir: @@ -30,13 +32,11 @@ def test_save_render(self): sc = yt.create_scene(ds) # make sure it renders if nothing exists, even if render = False - sc.save(os.path.join(self.tmpdir,'raw.png'), render=False) + sc.save(os.path.join(self.tmpdir, "raw.png"), render=False) # make sure it re-renders - sc.save(os.path.join(self.tmpdir,'raw_2.png'), render=True) + sc.save(os.path.join(self.tmpdir, "raw_2.png"), render=True) # make sure sigma clip does not re-render - sc.save(os.path.join(self.tmpdir,'clip_2.png'), sigma_clip=2., - render=False) - sc.save(os.path.join(self.tmpdir,'clip_4.png'), sigma_clip=4., - render=False) + sc.save(os.path.join(self.tmpdir, "clip_2.png"), sigma_clip=2.0, render=False) + sc.save(os.path.join(self.tmpdir, "clip_4.png"), sigma_clip=4.0, render=False) return sc From 0d5524f7007d6a73881b1fd9c5211507991053ad Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Mon, 20 Jul 2020 17:26:23 -0500 Subject: [PATCH 126/653] rename _check_render to _render_if_missing --- yt/visualization/volume_rendering/scene.py | 22 +++++++++------------- 1 file changed, 9 insertions(+), 13 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index fe3538d789c..203560f242d 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -226,24 +226,18 @@ def render(self, camera=None): self._last_render = bmp return bmp - def _check_render(self, fname, render=True): - # checks for existing render before saving, in most cases we want to + def _render_if_missing(self, render=True): + # checks for existing render before saving, in most cases we want to # render every time, but in some cases pulling the previous render is # desirable (e.g., if only changing sigma_clip or # saving after a call to sc.show()). if self._last_render is None: - mylog.warning( - "No previous rendered image found, rendering now and saving to %s", - fname, - ) + mylog.warning("No previous rendered image found, rendering now.") render = True elif render: - mylog.info( - "Overwriting previous rendered image with new rendering, saving to %s", - fname, - ) + mylog.info("Overwriting previous rendered image with new rendering.") else: - mylog.info("Saving most recently rendered image to %s.", fname) + mylog.info("Found previous rendered image to save.") if render: self.render() @@ -324,7 +318,8 @@ def save(self, fname=None, sigma_clip=None, render=True): suffix = ".png" fname = "%s%s" % (fname, suffix) - self._check_render(fname, render) + self._render_if_missing(render) + mylog.info("Saving rendered image to %s", fname) # We can render pngs natively but for other formats we defer to # matplotlib. @@ -452,7 +447,8 @@ def save_annotated( suffix = ".png" fname = "%s%s" % (fname, suffix) - self._check_render(fname, render) + self._render_if_missing(fname, render) + mylog.info("Saving rendered image to %s", fname) # which transfer function? rs = rensources[0] From dd1e107fa6786375a0011f2c35fd8404cc1aeae7 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Tue, 21 Jul 2020 08:08:18 -0500 Subject: [PATCH 127/653] changed render check to _sanitize_render --- yt/visualization/volume_rendering/scene.py | 22 ++++++++++++++-------- 1 file changed, 14 insertions(+), 8 deletions(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 203560f242d..c3822c422a6 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -226,21 +226,23 @@ def render(self, camera=None): self._last_render = bmp return bmp - def _render_if_missing(self, render=True): + def _sanitize_render(self, render=True): # checks for existing render before saving, in most cases we want to # render every time, but in some cases pulling the previous render is # desirable (e.g., if only changing sigma_clip or # saving after a call to sc.show()). if self._last_render is None: - mylog.warning("No previous rendered image found, rendering now.") + mylog.warning("No previously rendered image found, rendering now.") render = True elif render: - mylog.info("Overwriting previous rendered image with new rendering.") + mylog.warning( + "Previously rendered image exists, but rendering anyway. " + "Supply 'render=False' to save previously rendered image directly." + ) else: - mylog.info("Found previous rendered image to save.") + mylog.info("Found previously rendered image to save.") - if render: - self.render() + return render def save(self, fname=None, sigma_clip=None, render=True): r"""Saves a rendered image of the Scene to disk. @@ -318,7 +320,9 @@ def save(self, fname=None, sigma_clip=None, render=True): suffix = ".png" fname = "%s%s" % (fname, suffix) - self._render_if_missing(render) + render = self._sanitize_render(render) + if render: + self.render() mylog.info("Saving rendered image to %s", fname) # We can render pngs natively but for other formats we defer to @@ -447,7 +451,9 @@ def save_annotated( suffix = ".png" fname = "%s%s" % (fname, suffix) - self._render_if_missing(fname, render) + render = self._sanitize_render(render) + if render: + self.render() mylog.info("Saving rendered image to %s", fname) # which transfer function? From df31ec6e15aeed840e4664d8a2dc0005d1aea01f Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Wed, 22 Jul 2020 10:03:22 -0500 Subject: [PATCH 128/653] remove the redner default in _sanitize_render --- yt/visualization/volume_rendering/scene.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index c3822c422a6..27274eb0b87 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -226,7 +226,7 @@ def render(self, camera=None): self._last_render = bmp return bmp - def _sanitize_render(self, render=True): + def _sanitize_render(self, render): # checks for existing render before saving, in most cases we want to # render every time, but in some cases pulling the previous render is # desirable (e.g., if only changing sigma_clip or From 276d68132f2bbb3f26fb2cf6d662048546333f85 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Wed, 22 Jul 2020 14:30:37 -0500 Subject: [PATCH 129/653] isort fix --- yt/visualization/volume_rendering/tests/test_save_render.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/tests/test_save_render.py b/yt/visualization/volume_rendering/tests/test_save_render.py index 0ce47a5f9d4..9d198c8bbce 100644 --- a/yt/visualization/volume_rendering/tests/test_save_render.py +++ b/yt/visualization/volume_rendering/tests/test_save_render.py @@ -1,9 +1,10 @@ import os -import tempfile import shutil +import tempfile +from unittest import TestCase + import yt from yt.testing import fake_random_ds -from unittest import TestCase def setup(): From 17a7fbb469a9d0326220508413f9ad105d6292e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 12 Jun 2020 21:09:12 +0200 Subject: [PATCH 130/653] refactor: do not log a warning when sampling_type is left to default value in add_field methods, raise errors on invalid values --- yt/data_objects/static_output.py | 30 +++---------- yt/fields/field_info_container.py | 72 +++++++++++++++++++++++-------- yt/fields/local_fields.py | 53 +++++++++++------------ 3 files changed, 85 insertions(+), 70 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 5b32c9a7b27..bd630f8b36a 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -3,7 +3,6 @@ import os import pickle import time -import warnings import weakref from collections import defaultdict from stat import ST_CTIME @@ -1060,7 +1059,7 @@ def _assign_unit_system(self, unit_system): self.unit_registry.unit_system = self.unit_system def _create_unit_registry(self, unit_system): - import yt.units.dimensions as dimensions + from yt.units import dimensions as dimensions # yt assumes a CGS unit system by default (for back compat reasons). # Since unyt is MKS by default we specify the MKS values of the base @@ -1315,7 +1314,7 @@ def quan(self): self._quan = functools.partial(YTQuantity, registry=self.unit_registry) return self._quan - def add_field(self, name, function=None, sampling_type=None, **kwargs): + def add_field(self, name, function, sampling_type, **kwargs): """ Dataset-specific call to add_field @@ -1332,6 +1331,8 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): function : callable A function handle that defines the field. Should accept arguments (field, data) + sampling_type: str + "cell" or "particle" or "local" units : str A plain text string encoding the unit. Powers must be in python syntax (** instead of ^). @@ -1339,8 +1340,6 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): Describes whether the field should be logged validators : list A list of :class:`FieldValidator` objects - particle_type : bool - Is this a particle (1D) field? vector_field : bool Describes the dimensionality of the field. Currently unused. display_name : str @@ -1359,27 +1358,12 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): ) # Handle the case where the field has already been added. if not override and name in self.field_info: - mylog.error( + mylog.warning( "Field %s already exists. To override use " + "force_override=True.", name, ) - if kwargs.setdefault("particle_type", False): - if sampling_type is not None and sampling_type != "particle": - raise RuntimeError( - "Clashing definition of 'sampling_type' and " - "'particle_type'. Note that 'particle_type' is " - "deprecated. Please just use 'sampling_type'." - ) - else: - sampling_type = "particle" - if sampling_type is None: - warnings.warn( - "Because 'sampling_type' not specified, yt will " - "assume a cell 'sampling_type'", - stacklevel=2, - ) - sampling_type = "cell" - self.field_info.add_field(name, sampling_type, function=function, **kwargs) + + self.field_info.add_field(name, function, sampling_type, **kwargs) self.field_info._show_field_errors.append(name) deps, _ = self.field_info.check_derived_fields([name]) self.field_dependencies.update(deps) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index f22cf8031af..d55dc70a7e1 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -1,9 +1,8 @@ -import warnings from numbers import Number as numeric_type import numpy as np -from yt.funcs import mylog, only_on_root +from yt.funcs import issue_deprecation_warning, mylog, only_on_root from yt.geometry.geometry_handler import is_curvilinear from yt.units.dimensions import dimensionless from yt.units.unit_object import Unit @@ -247,7 +246,50 @@ def setup_fluid_aliases(self, ftype="gas"): alias = "%s_%s" % (alias[:-2], axis_names[2]) self.alias((ftype, alias), field) - def add_field(self, name, sampling_type, function=None, **kwargs): + @staticmethod + def _sanitize_sampling_type(sampling_type, particle_type=None): + """Detect conflicts between deprecated and new parameters to specify the + sampling type in a new field. + + This is a helper function to add_field methods. + + Parameters + ---------- + sampling_type: str + One of "cell", "particle" or "local" (case insensitive) + particle_type: str + This is a deprecated argument of the add_field method, + which was replaced by sampling_type. + + Raises + ------ + ValueError + For unsupported values in sampling_type + RuntimeError + If conflicting parameters are passed. + """ + try: + sampling_type = sampling_type.lower() + except AttributeError as e: + raise TypeError("sampling_type should be a string.") from e + + if sampling_type not in ("cell", "particle", "local"): + raise ValueError + + if particle_type: + issue_deprecation_warning( + "'particle_type' keyword argument is deprecated in favour " + "of the positional argument 'sampling_type'." + ) + if sampling_type != "particle": + raise RuntimeError( + "Conflicting values for parameters " + "'sampling_type' and 'particle_type'." + ) + + return sampling_type + + def add_field(self, name, function, sampling_type, **kwargs): """ Add a new field, along with supplemental metadata, to the list of available fields. This respects a number of arguments, all of which @@ -262,6 +304,8 @@ def add_field(self, name, sampling_type, function=None, **kwargs): function : callable A function handle that defines the field. Should accept arguments (field, data) + sampling_type: str + "cell" or "particle" or "local" units : str A plain text string encoding the unit. Powers must be in python syntax (** instead of ^). If set to "auto" the units @@ -270,8 +314,6 @@ def add_field(self, name, sampling_type, function=None, **kwargs): Describes whether the field should be logged validators : list A list of :class:`FieldValidator` objects - particle_type : bool - Is this a particle (1D) field? vector_field : bool Describes the dimensionality of the field. Currently unused. display_name : str @@ -290,8 +332,8 @@ def create_function(f): return create_function return # add_field can be used in two different ways: it can be called - # directly, or used as a decorator. If called directly, the - # function will be passed in as an argument, and we simply create + # directly, or used as a decorator (as yt.derived_field). If called directly, + # the function will be passed in as an argument, and we simply create # the derived field and exit. If used as a decorator, function will # be None. In that case, we return a function that will be applied # to the function that the decorator is applied to. @@ -308,19 +350,11 @@ def create_function(f): self[name] = DerivedField(name, sampling_type, function, **kwargs) return - particle_field = False - if sampling_type == "particle": - particle_field = True - - if kwargs.get("particle_type", False): - warnings.warn( - "The particle_type keyword argument of add_field has been " - 'deprecated. Please set sampling_type="particle" instead.', - stacklevel=2, - ) - particle_field = True + sampling_type = self._sanitize_sampling_type( + sampling_type, particle_type=kwargs.get("particle_type") + ) - if particle_field: + if sampling_type == "particle": ftype = "all" else: ftype = self.ds.default_fluid_type diff --git a/yt/fields/local_fields.py b/yt/fields/local_fields.py index a3ca48871c5..644a1e1289b 100644 --- a/yt/fields/local_fields.py +++ b/yt/fields/local_fields.py @@ -1,5 +1,4 @@ -import warnings - +from yt.funcs import iterable from yt.utilities.logger import ytLogger as mylog from .field_info_container import FieldInfoContainer @@ -7,44 +6,42 @@ class LocalFieldInfoContainer(FieldInfoContainer): - def add_field(self, name, function=None, sampling_type=None, **kwargs): - if not isinstance(name, tuple): - if kwargs.setdefault("particle_type", False): - name = ("all", name) + def add_field(self, name, function, sampling_type, **kwargs): + + sampling_type = self._sanitize_sampling_type( + sampling_type, kwargs.get("particle_type") + ) + + if isinstance(name, str) or not iterable(name): + if sampling_type == "particle": + ftype = "all" else: - name = ("gas", name) + ftype = "gas" + name = (ftype, name) + override = kwargs.get("force_override", False) # Handle the case where the field has already been added. if not override and name in self: - mylog.error( - "Field %s already exists. To override use " + "force_override=True.", - name, + mylog.warning( + "Field %s already exists. To override use force_override=True.", name, ) - if kwargs.setdefault("particle_type", False): - if sampling_type is not None and sampling_type != "particle": - raise RuntimeError( - "Clashing definition of 'sampling_type' and " - "'particle_type'. Note that 'particle_type' is " - "deprecated. Please just use 'sampling_type'." - ) - else: - sampling_type = "particle" - if sampling_type is None: - warnings.warn( - "Because 'sampling_type' is not specified, yt will " - "assume a 'cell' sampling_type for the %s field" % (name,), - stacklevel=3, - ) - sampling_type = "cell" + return super(LocalFieldInfoContainer, self).add_field( - name, sampling_type, function, **kwargs + name, function, sampling_type, **kwargs ) # Empty FieldInfoContainer local_fields = LocalFieldInfoContainer(None, [], None) -add_field = derived_field = local_fields.add_field +# we define two handles, pointing to the same function but documented differently +# yt.add_field() is meant to be used directly, while yt.derived_field is documented +# as a decorator. +add_field = local_fields.add_field + + +def derived_field(name, sampling_type, **kwargs): + return add_field(name=name, function=None, sampling_type=sampling_type, **kwargs) @register_field_plugin From 68771572abfdbffa77a0f09ed0ce284894462351 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Tue, 16 Jun 2020 15:25:36 -0400 Subject: [PATCH 131/653] A beginning stab (not working) at creating and fetching new hsmls --- yt/frontends/gadget/data_structures.py | 6 ++++ yt/frontends/gadget/io.py | 41 +++++++++++++++++++++++++- 2 files changed, 46 insertions(+), 1 deletion(-) diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 15ce3ff6335..119470891cf 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -578,6 +578,7 @@ def __init__( bounding_box=bounding_box, unit_system=unit_system, ) + self._check_hsml() def _get_hvals(self): handle = h5py.File(self.parameter_filename, mode="r") @@ -596,6 +597,11 @@ def _get_uvals(self): handle.close() return uvals + def _check_hsml(self): + handle = h5py.File(self.parameter_filename, mode="r") + self.gen_hsmls = "SmoothingLength" not in handle[self._sph_ptypes[0]] + handle.close() + def _set_owls_eagle(self): self.dimensionality = 3 diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 54fad669fd9..eaba56fd665 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -3,6 +3,8 @@ import numpy as np from yt.frontends.sph.io import IOHandlerSPH +from yt.utilities.lib.particle_kdtree_tools import \ + generate_smoothing_length from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py @@ -84,11 +86,48 @@ def _yield_coordinates(self, data_file, needed_ptype=None): yield key, pos f.close() + def _generate_smoothing_length(self, data_files, kdtree): + if not self.ds.gen_hsmls: + return + # TODO: We need to do something like this here but I'm not sure how + """ + if os.path.exists(self.hsml_filename): + with open(self.hsml_filename, 'rb') as f: + file_hash = struct.unpack('q', f.read(struct.calcsize('q')))[0] + if file_hash != self.ds._file_hash: + os.remove(self.hsml_filename) + else: + return + """ + positions = [] + for data_file in data_files: + for _, ppos in self._yield_coordinates( + data_file, needed_ptype=self.ds._sph_ptypes[0]): + positions.append(ppos) + if len(positions) == 0: + return + positions = np.concatenate(positions)[kdtree.idx] + hsml = generate_smoothing_length( + positions, kdtree, self.ds._num_neighbors) + dtype = positions.dtype + hsml = hsml[np.argsort(kdtree.idx)].astype(dtype) + for i, data_file in enumerate(data_files): + si, ei = data_file.start, data_file.end + hsml_fn = data_file.filename+".hsml" + with h5py.File(hsml_fn, mode='a') as f: + g = f.require_group(self.ds._sph_ptypes[0]) + d = g.require_dataset("SmoothingLength", shape=(,), dtype=dtype) + d[si:ei] = hsml + def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] ind = int(ptype[-1]) si, ei = data_file.start, data_file.end - with h5py.File(data_file.filename, "r") as f: + if self.ds.gen_hsmls: + fn = data_file.filename+".hsml" + else: + fn = data_file.filename + with h5py.File(fn, mode="r") as f: pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") pcount = np.clip(pcount - si, 0, ei - si) ds = f[ptype]["SmoothingLength"][si:ei, ...] From a3faedfadf0506c2f3793f846df9200e70dbad6b Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Tue, 16 Jun 2020 16:50:28 -0400 Subject: [PATCH 132/653] Fixing some issues but still no dice --- yt/frontends/gadget/io.py | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index eaba56fd665..12eac998833 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -1,4 +1,5 @@ import os +from collections import defaultdict import numpy as np @@ -100,24 +101,33 @@ def _generate_smoothing_length(self, data_files, kdtree): return """ positions = [] + counts = defaultdict(int) for data_file in data_files: for _, ppos in self._yield_coordinates( data_file, needed_ptype=self.ds._sph_ptypes[0]): + counts[data_file.filename] += ppos.shape[0] positions.append(ppos) if len(positions) == 0: return + offsets = {} + offset = 0 + for fn in counts: + offsets[fn] = offset + offset += counts[fn] positions = np.concatenate(positions)[kdtree.idx] hsml = generate_smoothing_length( positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype hsml = hsml[np.argsort(kdtree.idx)].astype(dtype) - for i, data_file in enumerate(data_files): + for data_file in data_files: si, ei = data_file.start, data_file.end + fn = data_file.filename hsml_fn = data_file.filename+".hsml" - with h5py.File(hsml_fn, mode='a') as f: + with h5py.File(hsml_fn, mode='a') as f: g = f.require_group(self.ds._sph_ptypes[0]) - d = g.require_dataset("SmoothingLength", shape=(,), dtype=dtype) - d[si:ei] = hsml + d = g.require_dataset("SmoothingLength", dtype=dtype, + shape=(counts[fn],)) + d[si:ei] = hsml[si+offsets[fn]:ei+offsets[fn]] def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] From b2a134e863885b3934e467554704dcebe673711a Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Tue, 16 Jun 2020 22:13:45 -0400 Subject: [PATCH 133/653] Everything is working now except the bit that prevents you from rewriting the files --- yt/frontends/arepo/fields.py | 34 ++++++++++++-------------- yt/frontends/arepo/io.py | 20 +++++++-------- yt/frontends/eagle/fields.py | 9 +++---- yt/frontends/gadget/data_structures.py | 8 ++---- yt/frontends/gadget/fields.py | 7 ++++++ yt/frontends/gadget/io.py | 12 +++++---- yt/frontends/owls/fields.py | 7 ++++-- 7 files changed, 50 insertions(+), 47 deletions(-) diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py index d06f05ddc72..6ce0426ba5a 100644 --- a/yt/frontends/arepo/fields.py +++ b/yt/frontends/arepo/fields.py @@ -7,24 +7,22 @@ class ArepoFieldInfo(GadgetFieldInfo): - known_particle_fields = GadgetFieldInfo.known_particle_fields + ( - ("smoothing_length", ("code_length", [], None)), - ("MagneticField", ("code_magnetic", ["particle_magnetic_field"], None)), - ( - "MagneticFieldDivergence", - ("code_magnetic/code_length", ["magnetic_field_divergence"], None), - ), - ("GFM_Metallicity", ("", ["metallicity"], None)), - ("GFM_Metals_00", ("", ["H_fraction"], None)), - ("GFM_Metals_01", ("", ["He_fraction"], None)), - ("GFM_Metals_02", ("", ["C_fraction"], None)), - ("GFM_Metals_03", ("", ["N_fraction"], None)), - ("GFM_Metals_04", ("", ["O_fraction"], None)), - ("GFM_Metals_05", ("", ["Ne_fraction"], None)), - ("GFM_Metals_06", ("", ["Mg_fraction"], None)), - ("GFM_Metals_07", ("", ["Si_fraction"], None)), - ("GFM_Metals_08", ("", ["Fe_fraction"], None)), - ) + known_particle_fields = GadgetFieldInfo.known_particle_fields + \ + (("MagneticField", + ("code_magnetic", ["particle_magnetic_field"], None)), + ("MagneticFieldDivergence", + ("code_magnetic/code_length", ["magnetic_field_divergence"], None)), + ("GFM_Metallicity", ("", ["metallicity"], None)), + ("GFM_Metals_00", ("", ["H_fraction"], None)), + ("GFM_Metals_01", ("", ["He_fraction"], None)), + ("GFM_Metals_02", ("", ["C_fraction"], None)), + ("GFM_Metals_03", ("", ["N_fraction"], None)), + ("GFM_Metals_04", ("", ["O_fraction"], None)), + ("GFM_Metals_05", ("", ["Ne_fraction"], None)), + ("GFM_Metals_06", ("", ["Mg_fraction"], None)), + ("GFM_Metals_07", ("", ["Si_fraction"], None)), + ("GFM_Metals_08", ("", ["Fe_fraction"], None)), + ) def __init__(self, ds, field_list, slice_info=None): if ds.cosmological_simulation: diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py index 549d7f4a8db..dfcd8e170a8 100644 --- a/yt/frontends/arepo/io.py +++ b/yt/frontends/arepo/io.py @@ -1,12 +1,14 @@ -import numpy as np - from yt.frontends.gadget.api import IOHandlerGadgetHDF5 +import numpy as np from yt.utilities.on_demand_imports import _h5py as h5py - class IOHandlerArepoHDF5(IOHandlerGadgetHDF5): _dataset_type = "arepo_hdf5" + def _generate_smoothing_length(self, data_files, kdtree): + # This is handled below in _get_smoothing_length + return + def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] ind = int(ptype[-1]) @@ -18,16 +20,12 @@ def _get_smoothing_length(self, data_file, position_dtype, position_shape): # we compute one here by finding the radius of the sphere # corresponding to the volume of the Voroni cell and multiplying # by a user-configurable smoothing factor. - hsml = f[ptype]["Masses"][si:ei, ...] / f[ptype]["Density"][si:ei, ...] - hsml *= 3.0 / (4.0 * np.pi) - hsml **= 1.0 / 3.0 + hsml = f[ptype]["Masses"][si:ei,...]/f[ptype]["Density"][si:ei,...] + hsml *= 3.0/(4.0*np.pi) + hsml **= (1./3.) hsml *= self.ds.smoothing_factor - dt = hsml.dtype.newbyteorder("N") # Native + dt = hsml.dtype.newbyteorder("N") # Native if position_dtype is not None and dt < position_dtype: dt = position_dtype return hsml.astype(dt) - def _identify_fields(self, data_file): - fields, _units = super(IOHandlerArepoHDF5, self)._identify_fields(data_file) - fields.append(("PartType0", "smoothing_length")) - return fields, _units diff --git a/yt/frontends/eagle/fields.py b/yt/frontends/eagle/fields.py index 2a91c4c9abc..decf8716ec6 100644 --- a/yt/frontends/eagle/fields.py +++ b/yt/frontends/eagle/fields.py @@ -127,13 +127,12 @@ class EagleNetworkFieldInfo(OWLSFieldInfo): "Fe27", ) - def __init__(self, *args, **kwargs): - - super(EagleNetworkFieldInfo, self).__init__(*args, **kwargs) + def __init__(self, ds, field_list, slice_info=None): + super(EagleNetworkFieldInfo,self).__init__(ds, field_list, slice_info=slice_info) def _create_ion_density_func(self, ftype, ion): - """ returns a function that calculates the ion density of a particle. - """ + """ returns a function that calculates the ion density of a particle. + """ def _ion_density(field, data): diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 119470891cf..a5888587de8 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -335,6 +335,7 @@ def __repr__(self): return os.path.basename(self.parameter_filename).split(".")[0] def _get_hvals(self): + self.gen_hsmls = False return self._header.value def _parse_parameter_file(self): @@ -578,7 +579,6 @@ def __init__( bounding_box=bounding_box, unit_system=unit_system, ) - self._check_hsml() def _get_hvals(self): handle = h5py.File(self.parameter_filename, mode="r") @@ -587,6 +587,7 @@ def _get_hvals(self): # Compat reasons. hvals["NumFiles"] = hvals["NumFilesPerSnapshot"] hvals["Massarr"] = hvals["MassTable"] + self.gen_hsmls = "SmoothingLength" not in handle[self._sph_ptypes[0]] handle.close() return hvals @@ -597,11 +598,6 @@ def _get_uvals(self): handle.close() return uvals - def _check_hsml(self): - handle = h5py.File(self.parameter_filename, mode="r") - self.gen_hsmls = "SmoothingLength" not in handle[self._sph_ptypes[0]] - handle.close() - def _set_owls_eagle(self): self.dimensionality = 3 diff --git a/yt/frontends/gadget/fields.py b/yt/frontends/gadget/fields.py index 50e7a8ee512..e934e0aa4c4 100644 --- a/yt/frontends/gadget/fields.py +++ b/yt/frontends/gadget/fields.py @@ -4,6 +4,13 @@ class GadgetFieldInfo(SPHFieldInfo): + def __init__(self, ds, field_list, slice_info=None): + if ds.gen_hsmls: + hsml = (("smoothing_length", ("code_length", [], None)),) + self.known_particle_fields += hsml + super(GadgetFieldInfo, self).__init__(ds, field_list, + slice_info=slice_info) + def setup_particle_fields(self, ptype, *args, **kwargs): # setup some special fields that only make sense for SPH particles diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 12eac998833..76bd990a0c5 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -119,7 +119,7 @@ def _generate_smoothing_length(self, data_files, kdtree): positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype hsml = hsml[np.argsort(kdtree.idx)].astype(dtype) - for data_file in data_files: + for data_file in data_files: si, ei = data_file.start, data_file.end fn = data_file.filename hsml_fn = data_file.filename+".hsml" @@ -127,19 +127,18 @@ def _generate_smoothing_length(self, data_files, kdtree): g = f.require_group(self.ds._sph_ptypes[0]) d = g.require_dataset("SmoothingLength", dtype=dtype, shape=(counts[fn],)) - d[si:ei] = hsml[si+offsets[fn]:ei+offsets[fn]] + begin = si+offsets[fn] + end = min(ei, d.size)+offsets[fn] + d[si:ei] = hsml[begin:end] def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] - ind = int(ptype[-1]) si, ei = data_file.start, data_file.end if self.ds.gen_hsmls: fn = data_file.filename+".hsml" else: fn = data_file.filename with h5py.File(fn, mode="r") as f: - pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") - pcount = np.clip(pcount - si, 0, ei - si) ds = f[ptype]["SmoothingLength"][si:ei, ...] dt = ds.dtype.newbyteorder("N") # Native if position_dtype is not None and dt < position_dtype: @@ -281,6 +280,9 @@ def _identify_fields(self, data_file): self._vector_fields[kk] = g[kk].shape[1] fields.append((ptype, str(kk))) + if self.ds.gen_hsmls: + fields.append(("PartType0", "smoothing_length")) + f.close() return fields, {} diff --git a/yt/frontends/owls/fields.py b/yt/frontends/owls/fields.py index 6d2698805fc..06533bccd18 100644 --- a/yt/frontends/owls/fields.py +++ b/yt/frontends/owls/fields.py @@ -85,7 +85,7 @@ class OWLSFieldInfo(SPHFieldInfo): _add_ions = "PartType0" - def __init__(self, *args, **kwargs): + def __init__(self, ds, field_list, slice_info=None): new_particle_fields = ( ("Hydrogen", ("", ["H_fraction"], None)), @@ -99,9 +99,12 @@ def __init__(self, *args, **kwargs): ("Iron", ("", ["Fe_fraction"], None)), ) + if ds.gen_hsmls: + new_particle_fields += (("smoothing_length", ("code_length", [], None)),) + self.known_particle_fields += new_particle_fields - super(OWLSFieldInfo, self).__init__(*args, **kwargs) + super(OWLSFieldInfo,self).__init__(ds, field_list, slice_info=slice_info) # This enables the machinery in yt.fields.species_fields self.species_names += list(self._elements) From a572fb050d6fd69f029669c75ea8d714bfb7a684 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Wed, 17 Jun 2020 08:33:08 -0400 Subject: [PATCH 134/653] Whitespace fix --- yt/frontends/eagle/fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/eagle/fields.py b/yt/frontends/eagle/fields.py index decf8716ec6..282eab05378 100644 --- a/yt/frontends/eagle/fields.py +++ b/yt/frontends/eagle/fields.py @@ -128,7 +128,7 @@ class EagleNetworkFieldInfo(OWLSFieldInfo): ) def __init__(self, ds, field_list, slice_info=None): - super(EagleNetworkFieldInfo,self).__init__(ds, field_list, slice_info=slice_info) + super(EagleNetworkFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) def _create_ion_density_func(self, ftype, ion): """ returns a function that calculates the ion density of a particle. From 70f77078d8bf7afff5a6ad1d91619ef26d48d1a2 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Mon, 22 Jun 2020 12:44:01 -0400 Subject: [PATCH 135/653] Some fixes in response to comments --- yt/frontends/arepo/fields.py | 16 ---------------- yt/frontends/gadget/io.py | 9 +++++---- 2 files changed, 5 insertions(+), 20 deletions(-) diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py index 6ce0426ba5a..94bdf40d0bf 100644 --- a/yt/frontends/arepo/fields.py +++ b/yt/frontends/arepo/fields.py @@ -7,22 +7,6 @@ class ArepoFieldInfo(GadgetFieldInfo): - known_particle_fields = GadgetFieldInfo.known_particle_fields + \ - (("MagneticField", - ("code_magnetic", ["particle_magnetic_field"], None)), - ("MagneticFieldDivergence", - ("code_magnetic/code_length", ["magnetic_field_divergence"], None)), - ("GFM_Metallicity", ("", ["metallicity"], None)), - ("GFM_Metals_00", ("", ["H_fraction"], None)), - ("GFM_Metals_01", ("", ["He_fraction"], None)), - ("GFM_Metals_02", ("", ["C_fraction"], None)), - ("GFM_Metals_03", ("", ["N_fraction"], None)), - ("GFM_Metals_04", ("", ["O_fraction"], None)), - ("GFM_Metals_05", ("", ["Ne_fraction"], None)), - ("GFM_Metals_06", ("", ["Mg_fraction"], None)), - ("GFM_Metals_07", ("", ["Si_fraction"], None)), - ("GFM_Metals_08", ("", ["Fe_fraction"], None)), - ) def __init__(self, ds, field_list, slice_info=None): if ds.cosmological_simulation: diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 76bd990a0c5..44945854432 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -107,13 +107,13 @@ def _generate_smoothing_length(self, data_files, kdtree): data_file, needed_ptype=self.ds._sph_ptypes[0]): counts[data_file.filename] += ppos.shape[0] positions.append(ppos) - if len(positions) == 0: + if not positions: return offsets = {} offset = 0 - for fn in counts: + for fn, count in counts.items(): offsets[fn] = offset - offset += counts[fn] + offset += count positions = np.concatenate(positions)[kdtree.idx] hsml = generate_smoothing_length( positions, kdtree, self.ds._num_neighbors) @@ -280,10 +280,11 @@ def _identify_fields(self, data_file): self._vector_fields[kk] = g[kk].shape[1] fields.append((ptype, str(kk))) + f.close() + if self.ds.gen_hsmls: fields.append(("PartType0", "smoothing_length")) - f.close() return fields, {} From ff166f3f61ccbc72f9e743e807f8ae98dee3e689 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Wed, 22 Jul 2020 12:09:49 -0400 Subject: [PATCH 136/653] This needs to be uconcatenate in case we have units --- yt/frontends/gadget/io.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 44945854432..aef739befbd 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -4,6 +4,7 @@ import numpy as np from yt.frontends.sph.io import IOHandlerSPH +from yt.units.yt_array import uconcatenate from yt.utilities.lib.particle_kdtree_tools import \ generate_smoothing_length from yt.utilities.logger import ytLogger as mylog @@ -114,7 +115,7 @@ def _generate_smoothing_length(self, data_files, kdtree): for fn, count in counts.items(): offsets[fn] = offset offset += count - positions = np.concatenate(positions)[kdtree.idx] + positions = uconcatenate(positions)[kdtree.idx] hsml = generate_smoothing_length( positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype From 2579161f95565a562b47feb2ecf77d7bab2b28e0 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Wed, 22 Jul 2020 12:10:37 -0400 Subject: [PATCH 137/653] A way to not regenerate the smoothing lengths but use them again if the file hash is the same --- yt/frontends/gadget/io.py | 21 ++++++++++++--------- 1 file changed, 12 insertions(+), 9 deletions(-) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index aef739befbd..d0ebcabd8df 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -91,16 +91,17 @@ def _yield_coordinates(self, data_file, needed_ptype=None): def _generate_smoothing_length(self, data_files, kdtree): if not self.ds.gen_hsmls: return - # TODO: We need to do something like this here but I'm not sure how - """ - if os.path.exists(self.hsml_filename): - with open(self.hsml_filename, 'rb') as f: - file_hash = struct.unpack('q', f.read(struct.calcsize('q')))[0] + hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5") + if os.path.exists(hsml_fn): + with h5py.File(hsml_fn, "r") as f: + file_hash = f.attrs["q"] if file_hash != self.ds._file_hash: - os.remove(self.hsml_filename) + mylog.warning("Replacing hsml files.") + for data_file in data_files: + hfn = data_file.filename.replace(".hdf5", ".hsml.hdf5") + os.remove(hfn) else: return - """ positions = [] counts = defaultdict(int) for data_file in data_files: @@ -120,11 +121,13 @@ def _generate_smoothing_length(self, data_files, kdtree): positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype hsml = hsml[np.argsort(kdtree.idx)].astype(dtype) - for data_file in data_files: + mylog.warning("Writing smoothing lengths to hsml files.") + for i, data_file in enumerate(data_files): si, ei = data_file.start, data_file.end fn = data_file.filename - hsml_fn = data_file.filename+".hsml" + hsml_fn = data_file.filename.replace(".hdf5", ".hsml.hdf5") with h5py.File(hsml_fn, mode='a') as f: + if i == 0: f.attrs['q'] = self.ds._file_hash g = f.require_group(self.ds._sph_ptypes[0]) d = g.require_dataset("SmoothingLength", dtype=dtype, shape=(counts[fn],)) From 0c3f7c484b31c7c2d6f9518533bf69d78dfc71db Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Wed, 22 Jul 2020 12:50:04 -0400 Subject: [PATCH 138/653] Bug fixes from a bad merge --- yt/frontends/arepo/fields.py | 15 ++++++++++++++- yt/frontends/gadget/io.py | 2 +- 2 files changed, 15 insertions(+), 2 deletions(-) diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py index 94bdf40d0bf..3c0ffb876e9 100644 --- a/yt/frontends/arepo/fields.py +++ b/yt/frontends/arepo/fields.py @@ -15,10 +15,23 @@ def __init__(self, ds, field_list, slice_info=None): GFM_SFT_units = "code_length/code_velocity" self.known_particle_fields += ( ("GFM_StellarFormationTime", (GFM_SFT_units, ["stellar_age"], None)), + ("MagneticField", ("code_magnetic", ["particle_magnetic_field"], None)), + ("MagneticFieldDivergence", + ("code_magnetic/code_length", ["magnetic_field_divergence"], None)), + ("GFM_Metallicity", ("", ["metallicity"], None)), + ("GFM_Metals_00", ("", ["H_fraction"], None)), + ("GFM_Metals_01", ("", ["He_fraction"], None)), + ("GFM_Metals_02", ("", ["C_fraction"], None)), + ("GFM_Metals_03", ("", ["N_fraction"], None)), + ("GFM_Metals_04", ("", ["O_fraction"], None)), + ("GFM_Metals_05", ("", ["Ne_fraction"], None)), + ("GFM_Metals_06", ("", ["Mg_fraction"], None)), + ("GFM_Metals_07", ("", ["Si_fraction"], None)), + ("GFM_Metals_08", ("", ["Fe_fraction"], None)), ) super(ArepoFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) - def setup_particle_fields(self, ptype): + def setup_particle_fields(self, ptype, *args, **kwargs): FieldInfoContainer.setup_particle_fields(self, ptype) if ptype == "PartType0": self.setup_gas_particle_fields(ptype) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index d0ebcabd8df..4c00d575f92 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -139,7 +139,7 @@ def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] si, ei = data_file.start, data_file.end if self.ds.gen_hsmls: - fn = data_file.filename+".hsml" + fn = data_file.filename.replace(".hdf5", ".hsml.hdf5") else: fn = data_file.filename with h5py.File(fn, mode="r") as f: From 0a9c102c44e75dbd3231e7415c2372f3252977a8 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Thu, 23 Jul 2020 09:13:44 -0400 Subject: [PATCH 139/653] Blackening and fix flake8 --- yt/frontends/arepo/fields.py | 7 ++++--- yt/frontends/arepo/io.py | 13 +++++++------ yt/frontends/eagle/fields.py | 8 +++++--- yt/frontends/gadget/fields.py | 3 +-- yt/frontends/gadget/io.py | 23 ++++++++++++----------- yt/frontends/owls/fields.py | 2 +- 6 files changed, 30 insertions(+), 26 deletions(-) diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py index 3c0ffb876e9..98cdd5a4b86 100644 --- a/yt/frontends/arepo/fields.py +++ b/yt/frontends/arepo/fields.py @@ -7,7 +7,6 @@ class ArepoFieldInfo(GadgetFieldInfo): - def __init__(self, ds, field_list, slice_info=None): if ds.cosmological_simulation: GFM_SFT_units = "dimensionless" @@ -16,8 +15,10 @@ def __init__(self, ds, field_list, slice_info=None): self.known_particle_fields += ( ("GFM_StellarFormationTime", (GFM_SFT_units, ["stellar_age"], None)), ("MagneticField", ("code_magnetic", ["particle_magnetic_field"], None)), - ("MagneticFieldDivergence", - ("code_magnetic/code_length", ["magnetic_field_divergence"], None)), + ( + "MagneticFieldDivergence", + ("code_magnetic/code_length", ["magnetic_field_divergence"], None), + ), ("GFM_Metallicity", ("", ["metallicity"], None)), ("GFM_Metals_00", ("", ["H_fraction"], None)), ("GFM_Metals_01", ("", ["He_fraction"], None)), diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py index dfcd8e170a8..1801024a080 100644 --- a/yt/frontends/arepo/io.py +++ b/yt/frontends/arepo/io.py @@ -1,7 +1,9 @@ -from yt.frontends.gadget.api import IOHandlerGadgetHDF5 import numpy as np + +from yt.frontends.gadget.api import IOHandlerGadgetHDF5 from yt.utilities.on_demand_imports import _h5py as h5py + class IOHandlerArepoHDF5(IOHandlerGadgetHDF5): _dataset_type = "arepo_hdf5" @@ -20,12 +22,11 @@ def _get_smoothing_length(self, data_file, position_dtype, position_shape): # we compute one here by finding the radius of the sphere # corresponding to the volume of the Voroni cell and multiplying # by a user-configurable smoothing factor. - hsml = f[ptype]["Masses"][si:ei,...]/f[ptype]["Density"][si:ei,...] - hsml *= 3.0/(4.0*np.pi) - hsml **= (1./3.) + hsml = f[ptype]["Masses"][si:ei, ...] / f[ptype]["Density"][si:ei, ...] + hsml *= 3.0 / (4.0 * np.pi) + hsml **= 1.0 / 3.0 hsml *= self.ds.smoothing_factor - dt = hsml.dtype.newbyteorder("N") # Native + dt = hsml.dtype.newbyteorder("N") # Native if position_dtype is not None and dt < position_dtype: dt = position_dtype return hsml.astype(dt) - diff --git a/yt/frontends/eagle/fields.py b/yt/frontends/eagle/fields.py index 282eab05378..5ae448d0e38 100644 --- a/yt/frontends/eagle/fields.py +++ b/yt/frontends/eagle/fields.py @@ -128,11 +128,13 @@ class EagleNetworkFieldInfo(OWLSFieldInfo): ) def __init__(self, ds, field_list, slice_info=None): - super(EagleNetworkFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) + super(EagleNetworkFieldInfo, self).__init__( + ds, field_list, slice_info=slice_info + ) def _create_ion_density_func(self, ftype, ion): - """ returns a function that calculates the ion density of a particle. - """ + """ returns a function that calculates the ion density of a particle. + """ def _ion_density(field, data): diff --git a/yt/frontends/gadget/fields.py b/yt/frontends/gadget/fields.py index e934e0aa4c4..c929fcbff6e 100644 --- a/yt/frontends/gadget/fields.py +++ b/yt/frontends/gadget/fields.py @@ -8,8 +8,7 @@ def __init__(self, ds, field_list, slice_info=None): if ds.gen_hsmls: hsml = (("smoothing_length", ("code_length", [], None)),) self.known_particle_fields += hsml - super(GadgetFieldInfo, self).__init__(ds, field_list, - slice_info=slice_info) + super(GadgetFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) def setup_particle_fields(self, ptype, *args, **kwargs): diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 4c00d575f92..40c4f7c41d1 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -5,8 +5,7 @@ from yt.frontends.sph.io import IOHandlerSPH from yt.units.yt_array import uconcatenate -from yt.utilities.lib.particle_kdtree_tools import \ - generate_smoothing_length +from yt.utilities.lib.particle_kdtree_tools import generate_smoothing_length from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py @@ -106,7 +105,8 @@ def _generate_smoothing_length(self, data_files, kdtree): counts = defaultdict(int) for data_file in data_files: for _, ppos in self._yield_coordinates( - data_file, needed_ptype=self.ds._sph_ptypes[0]): + data_file, needed_ptype=self.ds._sph_ptypes[0] + ): counts[data_file.filename] += ppos.shape[0] positions.append(ppos) if not positions: @@ -117,8 +117,7 @@ def _generate_smoothing_length(self, data_files, kdtree): offsets[fn] = offset offset += count positions = uconcatenate(positions)[kdtree.idx] - hsml = generate_smoothing_length( - positions, kdtree, self.ds._num_neighbors) + hsml = generate_smoothing_length(positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype hsml = hsml[np.argsort(kdtree.idx)].astype(dtype) mylog.warning("Writing smoothing lengths to hsml files.") @@ -126,13 +125,15 @@ def _generate_smoothing_length(self, data_files, kdtree): si, ei = data_file.start, data_file.end fn = data_file.filename hsml_fn = data_file.filename.replace(".hdf5", ".hsml.hdf5") - with h5py.File(hsml_fn, mode='a') as f: - if i == 0: f.attrs['q'] = self.ds._file_hash + with h5py.File(hsml_fn, mode="a") as f: + if i == 0: + f.attrs["q"] = self.ds._file_hash g = f.require_group(self.ds._sph_ptypes[0]) - d = g.require_dataset("SmoothingLength", dtype=dtype, - shape=(counts[fn],)) - begin = si+offsets[fn] - end = min(ei, d.size)+offsets[fn] + d = g.require_dataset( + "SmoothingLength", dtype=dtype, shape=(counts[fn],) + ) + begin = si + offsets[fn] + end = min(ei, d.size) + offsets[fn] d[si:ei] = hsml[begin:end] def _get_smoothing_length(self, data_file, position_dtype, position_shape): diff --git a/yt/frontends/owls/fields.py b/yt/frontends/owls/fields.py index 06533bccd18..eda0034fddc 100644 --- a/yt/frontends/owls/fields.py +++ b/yt/frontends/owls/fields.py @@ -104,7 +104,7 @@ def __init__(self, ds, field_list, slice_info=None): self.known_particle_fields += new_particle_fields - super(OWLSFieldInfo,self).__init__(ds, field_list, slice_info=slice_info) + super(OWLSFieldInfo, self).__init__(ds, field_list, slice_info=slice_info) # This enables the machinery in yt.fields.species_fields self.species_names += list(self._elements) From eda4d99e0ba83ca0eb6f144860a6956a5a13fae1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 14 Jul 2020 11:35:11 +0200 Subject: [PATCH 140/653] fix: improve read_amrvac_namelist api (automatically expand '~' tokens and natively support pathlike objects) --- yt/frontends/amrvac/__init__.py | 15 +++------------ .../amrvac/tests/test_read_amrvac_namelist.py | 6 ++++++ 2 files changed, 9 insertions(+), 12 deletions(-) diff --git a/yt/frontends/amrvac/__init__.py b/yt/frontends/amrvac/__init__.py index 6d05b21321d..927b5ddc9bb 100644 --- a/yt/frontends/amrvac/__init__.py +++ b/yt/frontends/amrvac/__init__.py @@ -1,12 +1,6 @@ -""" -API for yt.frontends.amrvac - - - -""" - - +import os from yt.utilities.on_demand_imports import _f90nml as f90nml +from yt.funcs import ensure_list def read_amrvac_namelist(parfiles): @@ -27,10 +21,7 @@ def read_amrvac_namelist(parfiles): A single namelist object. The class inherits from ordereddict. """ - # typechecking - if isinstance(parfiles, str): - parfiles = [parfiles] - assert all([isinstance(pf, str) for pf in parfiles]) + parfiles = [os.path.expanduser(pf) for pf in ensure_list(parfiles)] # first merge the namelists namelists = [f90nml.read(parfile) for parfile in parfiles] diff --git a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py index 13d0e0bb07f..09ac90389ac 100644 --- a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py +++ b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py @@ -1,3 +1,5 @@ +from os import path +from pathlib import Path from copy import deepcopy from os import path @@ -10,6 +12,10 @@ modifier_parfile = path.join(test_dir, "sample_parfiles/tvdlf_scheme.par") +@requires_module("f90nml") +def test_read_pathlike(): + read_amrvac_namelist(Path(blast_wave_parfile)) + @requires_module("f90nml") def test_read_one_file(): """when provided a single file, the function should merely act as a wrapper for f90nml.read()""" From 1ec614b6e6702b06e57cb291dde171eaddfb59fa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 14 Jul 2020 11:40:06 +0200 Subject: [PATCH 141/653] tests: fix windows incompatibility in existing tests for amrvac namelist reader --- yt/frontends/amrvac/__init__.py | 3 ++- yt/frontends/amrvac/tests/test_read_amrvac_namelist.py | 8 ++++---- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/yt/frontends/amrvac/__init__.py b/yt/frontends/amrvac/__init__.py index 927b5ddc9bb..194a0e40289 100644 --- a/yt/frontends/amrvac/__init__.py +++ b/yt/frontends/amrvac/__init__.py @@ -1,6 +1,7 @@ import os -from yt.utilities.on_demand_imports import _f90nml as f90nml + from yt.funcs import ensure_list +from yt.utilities.on_demand_imports import _f90nml as f90nml def read_amrvac_namelist(parfiles): diff --git a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py index 09ac90389ac..a719e08bb6d 100644 --- a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py +++ b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py @@ -1,21 +1,21 @@ -from os import path -from pathlib import Path from copy import deepcopy from os import path +from pathlib import Path from yt.frontends.amrvac import read_amrvac_namelist from yt.testing import requires_module from yt.utilities.on_demand_imports import _f90nml as f90nml test_dir = path.dirname(path.abspath(__file__)) -blast_wave_parfile = path.join(test_dir, "sample_parfiles/bw_3d.par") -modifier_parfile = path.join(test_dir, "sample_parfiles/tvdlf_scheme.par") +blast_wave_parfile = path.join(test_dir, "sample_parfiles", "bw_3d.par") +modifier_parfile = path.join(test_dir, "sample_parfiles", "tvdlf_scheme.par") @requires_module("f90nml") def test_read_pathlike(): read_amrvac_namelist(Path(blast_wave_parfile)) + @requires_module("f90nml") def test_read_one_file(): """when provided a single file, the function should merely act as a wrapper for f90nml.read()""" From 7decd992e6d57394e63efd12bd0cc1b9d4502676 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 18:11:00 +0200 Subject: [PATCH 142/653] fme --- yt/frontends/amrvac/tests/test_read_amrvac_namelist.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py index a719e08bb6d..1ba93ba4c4b 100644 --- a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py +++ b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py @@ -1,14 +1,14 @@ +import os from copy import deepcopy -from os import path from pathlib import Path from yt.frontends.amrvac import read_amrvac_namelist from yt.testing import requires_module from yt.utilities.on_demand_imports import _f90nml as f90nml -test_dir = path.dirname(path.abspath(__file__)) -blast_wave_parfile = path.join(test_dir, "sample_parfiles", "bw_3d.par") -modifier_parfile = path.join(test_dir, "sample_parfiles", "tvdlf_scheme.par") +test_dir = os.path.dirname(os.path.abspath(__file__)) +blast_wave_parfile = os.path.join(test_dir, "sample_parfiles", "bw_3d.par") +modifier_parfile = os.path.join(test_dir, "sample_parfiles", "tvdlf_scheme.par") @requires_module("f90nml") From 09febf59a4315fd9643f152d2ebe673a048a78e4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 22 Jul 2020 16:47:32 +0200 Subject: [PATCH 143/653] refactor: move reader function to io module --- doc/source/reference/api/api.rst | 2 +- yt/frontends/amrvac/__init__.py | 36 +----------------------------- yt/frontends/amrvac/api.py | 8 ++----- yt/frontends/amrvac/io.py | 38 ++++++++++++++++++++++++++++++++ 4 files changed, 42 insertions(+), 42 deletions(-) diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index 8e5068ae85d..43d17694e8c 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -189,7 +189,7 @@ AMRVAC ~yt.frontends.amrvac.data_structures.AMRVACDataset ~yt.frontends.amrvac.fields.AMRVACFieldInfo ~yt.frontends.amrvac.io.AMRVACIOHandler - ~yt.frontends.amrvac.read_amrvac_namelist + ~yt.frontends.amrvac.io.read_amrvac_namelist ARTIO ^^^^^ diff --git a/yt/frontends/amrvac/__init__.py b/yt/frontends/amrvac/__init__.py index 194a0e40289..889113677bf 100644 --- a/yt/frontends/amrvac/__init__.py +++ b/yt/frontends/amrvac/__init__.py @@ -1,39 +1,5 @@ import os from yt.funcs import ensure_list -from yt.utilities.on_demand_imports import _f90nml as f90nml - -def read_amrvac_namelist(parfiles): - """Read one or more parfiles, and return a unified f90nml.Namelist object. - - This function replicates the patching logic of MPI-AMRVAC where redundant parameters - only retain last-in-line values EXCEPT `&filelist:base_filename`, which is accumulated. - When passed a single file, this function acts as a mere wrapper of f90nml.read(). - - Parameters - ---------- - parfiles : str or list - A file path, or a list of file paths to MPI-AMRVAC configuration parfiles. - - Returns - ------- - unified_namelist : f90nml.Namelist - A single namelist object. The class inherits from ordereddict. - - """ - parfiles = [os.path.expanduser(pf) for pf in ensure_list(parfiles)] - - # first merge the namelists - namelists = [f90nml.read(parfile) for parfile in parfiles] - unified_namelist = f90nml.Namelist() - for nml in namelists: - unified_namelist.patch(nml) - - # accumulate `&filelist:base_filename` - base_filename = "".join( - [nml.get("filelist", {}).get("base_filename", "") for nml in namelists] - ) - unified_namelist["filelist"]["base_filename"] = base_filename - - return unified_namelist +from .io import read_amrvac_namelist diff --git a/yt/frontends/amrvac/api.py b/yt/frontends/amrvac/api.py index 4837ef7e4a8..d38d5395e00 100644 --- a/yt/frontends/amrvac/api.py +++ b/yt/frontends/amrvac/api.py @@ -1,11 +1,7 @@ """ -API for yt.frontends.amrvac - - - +frontend API: a submodule that exposes user-facing defs and classes """ - from .data_structures import AMRVACDataset, AMRVACGrid, AMRVACHierarchy from .fields import AMRVACFieldInfo -from .io import AMRVACIOHandler +from .io import AMRVACIOHandler, read_amrvac_namelist diff --git a/yt/frontends/amrvac/io.py b/yt/frontends/amrvac/io.py index 39ed1d50e62..612dce24c34 100644 --- a/yt/frontends/amrvac/io.py +++ b/yt/frontends/amrvac/io.py @@ -4,15 +4,53 @@ """ +import os import numpy as np +from yt.funcs import ensure_list from yt.geometry.selection_routines import GridSelector from yt.utilities.io_handler import BaseIOHandler +from yt.utilities.on_demand_imports import _f90nml as f90nml from .datfile_utils import get_single_block_field_data +def read_amrvac_namelist(parfiles): + """Read one or more parfiles, and return a unified f90nml.Namelist object. + + This function replicates the patching logic of MPI-AMRVAC where redundant parameters + only retain last-in-line values EXCEPT `&filelist:base_filename`, which is accumulated. + When passed a single file, this function acts as a mere wrapper of f90nml.read(). + + Parameters + ---------- + parfiles : str or list + A file path, or a list of file paths to MPI-AMRVAC configuration parfiles. + + Returns + ------- + unified_namelist : f90nml.Namelist + A single namelist object. The class inherits from ordereddict. + + """ + parfiles = [os.path.expanduser(pf) for pf in ensure_list(parfiles)] + + # first merge the namelists + namelists = [f90nml.read(parfile) for parfile in parfiles] + unified_namelist = f90nml.Namelist() + for nml in namelists: + unified_namelist.patch(nml) + + # accumulate `&filelist:base_filename` + base_filename = "".join( + [nml.get("filelist", {}).get("base_filename", "") for nml in namelists] + ) + unified_namelist["filelist"]["base_filename"] = base_filename + + return unified_namelist + + class AMRVACIOHandler(BaseIOHandler): _particle_reader = False _dataset_type = "amrvac" From 068d6bdfe44e74d2ed992e2f5575967f7126d29b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 22 Jul 2020 16:56:48 +0200 Subject: [PATCH 144/653] doc: update docstring --- yt/frontends/amrvac/io.py | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/yt/frontends/amrvac/io.py b/yt/frontends/amrvac/io.py index 612dce24c34..da29e3abe38 100644 --- a/yt/frontends/amrvac/io.py +++ b/yt/frontends/amrvac/io.py @@ -20,12 +20,13 @@ def read_amrvac_namelist(parfiles): """Read one or more parfiles, and return a unified f90nml.Namelist object. This function replicates the patching logic of MPI-AMRVAC where redundant parameters - only retain last-in-line values EXCEPT `&filelist:base_filename`, which is accumulated. - When passed a single file, this function acts as a mere wrapper of f90nml.read(). + only retain last-in-line values, with the exception of `&filelist:base_filename`, + which is accumulated. When passed a single file, this function acts as a mere + wrapper of f90nml.read(). Parameters ---------- - parfiles : str or list + parfiles : str, os.Pathlike, byte, or an iterable returning those types A file path, or a list of file paths to MPI-AMRVAC configuration parfiles. Returns From a3586d426a2503ae132e241a6bd91f2ffe4bb683 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 14 Jul 2020 20:34:15 +0200 Subject: [PATCH 145/653] hotfix: disable ghost-zone smoothing for AMRVAC grids. This allows computation of vorticity to complete even with improper ghost-zones reconstruction. Results are visually impaired but it's a lot easier than actually implementing the missing ghost-zones support. --- yt/frontends/amrvac/data_structures.py | 13 +++++++++++++ 1 file changed, 13 insertions(+) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index a390b5aabfd..26df66fac82 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -9,6 +9,7 @@ import os import stat import struct +import warnings import weakref import numpy as np @@ -58,6 +59,18 @@ def get_global_startindex(self): self.start_index = np.rint(start_index).astype("int64").ravel() return self.start_index + def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False): + if smoothed: + warnings.warn( + "ghost-zones interpolation/smoothing is not " + "currently supported for AMRVAC data.", + category=RuntimeWarning, + ) + smoothed = False + return super(AMRVACGrid, self).retrieve_ghost_zones( + n_zones, fields, all_levels=all_levels, smoothed=smoothed + ) + class AMRVACHierarchy(GridIndex): grid = AMRVACGrid From 15e494f438bca87d3299ae76f431185185c7c2ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 4 Jul 2020 21:07:09 +0200 Subject: [PATCH 146/653] feature: add support for numpy-like ellipsis selection to RegionExpression --- yt/data_objects/region_expression.py | 24 ++++++++++++++++++++++-- yt/data_objects/tests/test_regions.py | 23 ++++++++++++++++++++++- 2 files changed, 44 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index 40f8548d6d8..bf3ca2c21a3 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -2,7 +2,11 @@ from yt.funcs import obj_length from yt.units.yt_array import YTQuantity -from yt.utilities.exceptions import YTDimensionalityError, YTFieldNotParseable +from yt.utilities.exceptions import ( + YTDimensionalityError, + YTFieldNotFound, + YTFieldNotParseable, +) from yt.visualization.line_plot import LineBuffer from .data_containers import _get_ipython_key_completion @@ -26,7 +30,7 @@ def __getitem__(self, item): # that result in a rectangular prism or a slice. try: return self.all_data[item] - except (TypeError, YTFieldNotParseable): + except (TypeError, YTFieldNotFound, YTFieldNotParseable): pass if isinstance(item, slice): @@ -42,6 +46,22 @@ def __getitem__(self, item): # ds.r[::256j, ::256j, ::256j]. Other cases would be if we do # ds.r[0.1:0.9] where it will be expanded along all dimensions. item = tuple(item for _ in range(self.ds.dimensionality)) + + if item is Ellipsis: + item = (Ellipsis,) + + # from this point, item is implicitly assumed to be iterable + if Ellipsis in item: + # expand "..." into the appropriate number of ":" + item = list(item) + idx = item.index(Ellipsis) + item.pop(idx) + if Ellipsis in item: + # this error mimics numpy's + raise IndexError("an index can only have a single ellipsis ('...')") + while len(item) < self.ds.dimensionality: + item.insert(idx, slice(None)) + if len(item) != self.ds.dimensionality: # Not the right specification, and we don't want to do anything # implicitly. Note that this happens *after* the implicit expansion diff --git a/yt/data_objects/tests/test_regions.py b/yt/data_objects/tests/test_regions.py index 4531191d88c..7df06a144dd 100644 --- a/yt/data_objects/tests/test_regions.py +++ b/yt/data_objects/tests/test_regions.py @@ -1,4 +1,4 @@ -from yt.testing import assert_array_equal, fake_amr_ds, fake_random_ds +from yt.testing import assert_array_equal, assert_raises, fake_amr_ds, fake_random_ds from yt.units import cm @@ -28,3 +28,24 @@ def test_max_level_min_level_semantics(): assert ad["grid_level"].min() == 2 ad.min_level = 0 assert ad["grid_level"].min() == 0 + + +def test_ellipsis_selection(): + ds = fake_amr_ds() + reg = ds.r[:, :, :] + ereg = ds.r[...] + assert_array_equal(reg.fwidth, ereg.fwidth) + + reg = ds.r[(0.5, "cm"), :, :] + ereg = ds.r[(0.5, "cm"), ...] + assert_array_equal(reg.fwidth, ereg.fwidth) + + reg = ds.r[:, :, (0.5, "cm")] + ereg = ds.r[..., (0.5, "cm")] + assert_array_equal(reg.fwidth, ereg.fwidth) + + reg = ds.r[:, :, (0.5, "cm")] + ereg = ds.r[..., (0.5, "cm")] + assert_array_equal(reg.fwidth, ereg.fwidth) + + assert_raises(IndexError, ds.r.__getitem__, (..., (0.5, "cm"), ...)) From de1e3d0975cbce0f05cd89fe2deade4c8861bfe3 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 23 Jul 2020 17:48:04 -0500 Subject: [PATCH 147/653] Ignore .kdtree file while detecting format. Fixes #2778 --- yt/frontends/gadget/data_structures.py | 7 ++++++- yt/frontends/gizmo/data_structures.py | 7 ++++++- yt/frontends/owls/data_structures.py | 7 ++++++- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 15ce3ff6335..bedfea4b6f2 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -248,7 +248,12 @@ def __init__( # came through _is_valid in load() for f in os.listdir(filename): fname = os.path.join(filename, f) - if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname): + fext = os.path.splitext(fname)[-1] + if ( + (".0" in f) + and (fext not in {".ewah", ".kdtree"}) + and os.path.isfile(fname) + ): filename = os.path.join(filename, f) break self._header = GadgetBinaryHeader(filename, header_spec) diff --git a/yt/frontends/gizmo/data_structures.py b/yt/frontends/gizmo/data_structures.py index b2777fc80f8..df3163e9bb7 100644 --- a/yt/frontends/gizmo/data_structures.py +++ b/yt/frontends/gizmo/data_structures.py @@ -20,7 +20,12 @@ def _is_valid(self, *args, **kwargs): valid_files = [] for f in os.listdir(args[0]): fname = os.path.join(args[0], f) - if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname): + fext = os.path.splitext(fname)[-1] + if ( + (".0" in f) + and (fext not in {".ewah", ".kdtree"}) + and os.path.isfile(fname) + ): valid_files.append(fname) if len(valid_files) == 0: valid = False diff --git a/yt/frontends/owls/data_structures.py b/yt/frontends/owls/data_structures.py index 4e28099b199..a3132c1b284 100644 --- a/yt/frontends/owls/data_structures.py +++ b/yt/frontends/owls/data_structures.py @@ -48,7 +48,12 @@ def _is_valid(self, *args, **kwargs): valid_files = [] for f in os.listdir(args[0]): fname = os.path.join(args[0], f) - if (".0" in f) and (".ewah" not in f) and os.path.isfile(fname): + fext = os.path.splitext(fname)[-1] + if ( + (".0" in f) + and (fext not in {".ewah", ".kdtree"}) + and os.path.isfile(fname) + ): valid_files.append(fname) if len(valid_files) == 0: valid = False From c7db250aef3a582a560028644d45701ec25a9cc3 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Fri, 24 Jul 2020 09:26:53 -0400 Subject: [PATCH 148/653] Remove this line to restore this test --- tests/tests.yaml | 1 - 1 file changed, 1 deletion(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 484cad2eaf6..893ee953a73 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -162,6 +162,5 @@ other_tests: unittests: - '--exclude=test_mesh_slices' # disable randomly failing test - '--exclude=test_load_from_path' # py2 - - '--exclude=test_Snipshot' # until PR 2645 is merged cookbook: - 'doc/source/cookbook/tests/test_cookbook.py' From 22b162aec81123e35d016c7749acc607eae4c221 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 00:59:53 +0200 Subject: [PATCH 149/653] refactor: separate tupleization from field determination --- setup.cfg | 2 +- yt/data_objects/data_containers.py | 99 +++++++++++++++++----------- yt/data_objects/region_expression.py | 8 +-- 3 files changed, 62 insertions(+), 47 deletions(-) diff --git a/setup.cfg b/setup.cfg index 2cedac43241..bd18a3561b5 100644 --- a/setup.cfg +++ b/setup.cfg @@ -52,6 +52,6 @@ combine_as_imports=True line_length=88 # isort can't be applied to yt/__init__.py because it creates circular imports skip = venv, doc, benchmarks, yt/__init__.py, yt/extern -known_third_party = IPython, nose, numpy, sympy, matplotlib, unyt, git, yaml, dateutil, requests, coverage, pytest, pyx +known_third_party = IPython, nose, numpy, sympy, matplotlib, unyt, git, yaml, dateutil, requests, coverage, pytest, pyx, glue known_first_party = yt sections = FUTURE,STDLIB,THIRDPARTY,FIRSTPARTY,LOCALFOLDER diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index d19d8b77b05..0ea775ff514 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -12,7 +12,6 @@ import yt.geometry.selection_routines from yt.data_objects.field_data import YTFieldData from yt.data_objects.profiles import create_profile -from yt.fields.derived_field import DerivedField from yt.fields.field_exceptions import NeedsGridType from yt.frontends.ytdata.utilities import save_as_dataset from yt.funcs import ( @@ -1428,6 +1427,49 @@ def _field_type_state(self, ftype, finfo, obj=None): obj._current_particle_type = old_particle_type obj._current_fluid_type = old_fluid_type + def _tupleize_field(self, field): + + try: + ftype, fname = field.name + return ftype, fname + except AttributeError: + pass + + if iterable(field) and not isinstance(field, str): + try: + ftype, fname = field + if not all(isinstance(_, str) for _ in field): + raise TypeError + return ftype, fname + except TypeError as e: + raise YTFieldNotParseable(field) from e + except ValueError: + pass + + try: + fname = field + finfo = self.ds._get_field_info(field) + if finfo.sampling_type == "particle": + ftype = self._current_particle_type + if hasattr(self.ds, "_sph_ptypes"): + ptypes = self.ds._sph_ptypes + if finfo.name[0] in ptypes: + ftype = finfo.name[0] + elif finfo.alias_field and finfo.alias_name[0] in ptypes: + ftype = self._current_fluid_type + else: + ftype = self._current_fluid_type + if (ftype, fname) not in self.ds.field_info: + ftype = self.ds._last_freq[0] + return ftype, fname + except YTFieldNotFound: + pass + + if isinstance(field, str): + return "unknown", field + + raise YTFieldNotParseable(field) + def _determine_fields(self, fields): fields = ensure_list(fields) explicit_fields = [] @@ -1435,45 +1477,22 @@ def _determine_fields(self, fields): if field in self._container_fields: explicit_fields.append(field) continue - if isinstance(field, tuple): - if ( - len(field) != 2 - or not isinstance(field[0], str) - or not isinstance(field[1], str) - ): - raise YTFieldNotParseable(field) - ftype, fname = field - finfo = self.ds._get_field_info(ftype, fname) - elif isinstance(field, DerivedField): - ftype, fname = field.name - finfo = field - else: - fname = field - finfo = self.ds._get_field_info("unknown", fname) - if finfo.sampling_type == "particle": - ftype = self._current_particle_type - if hasattr(self.ds, "_sph_ptypes"): - ptypes = self.ds._sph_ptypes - if finfo.name[0] in ptypes: - ftype = finfo.name[0] - elif finfo.alias_field and finfo.alias_name[0] in ptypes: - ftype = self._current_fluid_type - else: - ftype = self._current_fluid_type - if (ftype, fname) not in self.ds.field_info: - ftype = self.ds._last_freq[0] - - # really ugly check to ensure that this field really does exist somewhere, - # in some naming convention, before returning it as a possible field type - if ( - (ftype, fname) not in self.ds.field_info - and (ftype, fname) not in self.ds.field_list - and fname not in self.ds.field_list - and (ftype, fname) not in self.ds.derived_field_list - and fname not in self.ds.derived_field_list - and (ftype, fname) not in self._container_fields - ): - raise YTFieldNotFound((ftype, fname), self.ds) + + ftype, fname = self._tupleize_field(field) + # print(field, " : ",ftype, fname) + finfo = self.ds._get_field_info(ftype, fname) + + # really ugly check to ensure that this field really does exist somewhere, + # in some naming convention, before returning it as a possible field type + if ( + (ftype, fname) not in self.ds.field_info + and (ftype, fname) not in self.ds.field_list + and fname not in self.ds.field_list + and (ftype, fname) not in self.ds.derived_field_list + and fname not in self.ds.derived_field_list + and (ftype, fname) not in self._container_fields + ): + raise YTFieldNotFound((ftype, fname), self.ds) # these tests are really insufficient as a field type may be valid, and the # field name may be valid, but not the combination (field type, field name) diff --git a/yt/data_objects/region_expression.py b/yt/data_objects/region_expression.py index bf3ca2c21a3..9427872e832 100644 --- a/yt/data_objects/region_expression.py +++ b/yt/data_objects/region_expression.py @@ -2,11 +2,7 @@ from yt.funcs import obj_length from yt.units.yt_array import YTQuantity -from yt.utilities.exceptions import ( - YTDimensionalityError, - YTFieldNotFound, - YTFieldNotParseable, -) +from yt.utilities.exceptions import YTDimensionalityError, YTFieldNotParseable from yt.visualization.line_plot import LineBuffer from .data_containers import _get_ipython_key_completion @@ -30,7 +26,7 @@ def __getitem__(self, item): # that result in a rectangular prism or a slice. try: return self.all_data[item] - except (TypeError, YTFieldNotFound, YTFieldNotParseable): + except (TypeError, YTFieldNotParseable): pass if isinstance(item, slice): From fb666607b7d88365bd4c4727e6ebfa8553c36ed0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 15:37:41 +0200 Subject: [PATCH 150/653] clarify error for YTFieldNotFound error raised in Dataset._get_field_info() --- yt/data_objects/static_output.py | 5 +++-- yt/utilities/exceptions.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index a89e4232739..dae0cb8832c 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -813,6 +813,7 @@ def _setup_particle_types(self, ptypes=None): def _get_field_info(self, ftype, fname=None): self.index + INPUT = ftype, fname if fname is None: if isinstance(ftype, DerivedField): ftype, fname = ftype.name @@ -860,7 +861,7 @@ def _get_field_info(self, ftype, fname=None): self._last_freq = (ftype, fname) self._last_finfo = self.field_info[(ftype, fname)] return self._last_finfo - raise YTFieldNotFound((ftype, fname), self) + raise YTFieldNotFound(field=INPUT, ds=self) def _setup_classes(self): # Called by subclass @@ -1081,7 +1082,7 @@ def _assign_unit_system(self, unit_system): self.unit_registry.unit_system = self.unit_system def _create_unit_registry(self, unit_system): - import yt.units.dimensions as dimensions + from yt.units import dimensions as dimensions # yt assumes a CGS unit system by default (for back compat reasons). # Since unyt is MKS by default we specify the MKS values of the base diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index cc0d68e5c9e..34ab24edc6f 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -52,12 +52,12 @@ def __str__(self): class YTFieldNotFound(YTException): - def __init__(self, fname, ds): - self.fname = fname + def __init__(self, field, ds): + self.field = field self.ds = ds def __str__(self): - return "Could not find field '%s' in %s." % (self.fname, self.ds) + return "Could not find field %s in %s." % (self.field, self.ds) class YTParticleTypeNotFound(YTException): From 7411feb3ead49776ad713002d3233887f6ca8c94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 15:38:52 +0200 Subject: [PATCH 151/653] refactor: rewrite Dataset._get_field_info() so that when it fails to guess the ftype, the error message shows the user input instead of the latest internal attempt --- yt/data_objects/static_output.py | 30 ++++++++++++------- yt/data_objects/tests/test_data_containers.py | 2 +- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index dae0cb8832c..c63dc60c05e 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -16,7 +16,7 @@ from yt.data_objects.particle_filters import filter_registry from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.region_expression import RegionExpression -from yt.fields.derived_field import DerivedField, ValidateSpatial +from yt.fields.derived_field import ValidateSpatial from yt.fields.field_type_container import FieldTypeContainer from yt.fields.fluid_fields import setup_gradient_fields from yt.fields.particle_fields import DEP_MSG_SMOOTH_FIELD @@ -813,25 +813,32 @@ def _setup_particle_types(self, ptypes=None): def _get_field_info(self, ftype, fname=None): self.index + + # store the original inputs in case we need to raise an error INPUT = ftype, fname if fname is None: - if isinstance(ftype, DerivedField): + try: ftype, fname = ftype.name - else: + except AttributeError: ftype, fname = "unknown", ftype - guessing_type = False - if ftype == "unknown": - guessing_type = True + + # storing this condition before altering it + guessing_type = ftype == "unknown" + if guessing_type: ftype = self._last_freq[0] or ftype field = (ftype, fname) - if field == self._last_freq: - if field not in self.field_info.field_aliases.values(): - return self._last_finfo + + if ( + field == self._last_freq + and field not in self.field_info.field_aliases.values() + ): + return self._last_finfo if field in self.field_info: self._last_freq = field self._last_finfo = self.field_info[(ftype, fname)] return self._last_finfo - if fname in self.field_info: + + try: # Sometimes, if guessing_type == True, this will be switched for # the type of field it is. So we look at the field type and # determine if we need to change the type. @@ -848,6 +855,9 @@ def _get_field_info(self, ftype, fname=None): field = self.default_fluid_type, field[1] self._last_freq = field return self._last_finfo + except KeyError: + pass + # We also should check "all" for particles, which can show up if you're # mixing deposition/gas fields with particle fields. if guessing_type: diff --git a/yt/data_objects/tests/test_data_containers.py b/yt/data_objects/tests/test_data_containers.py index 7c1461b9ab7..e04ea00eb75 100644 --- a/yt/data_objects/tests/test_data_containers.py +++ b/yt/data_objects/tests/test_data_containers.py @@ -57,7 +57,7 @@ def test_yt_data_container(self): # Delete a non-existent field with assert_raises(YTFieldNotFound) as ex: del proj["p_mass"] - desired = "Could not find field '('stream', 'p_mass')' in UniformGridData." + desired = "Could not find field ('unknown', 'p_mass') in UniformGridData." assert_equal(str(ex.exception), desired) def test_write_out(self): From c59f2fae8e809d1ef01c0cded92bc31415794648 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 25 Jun 2020 11:11:53 +0200 Subject: [PATCH 152/653] tests: rewrite tests for yt.load (moved from frontends/stream/tests/), remove deprecated tests --- tests/tests.yaml | 1 - yt/frontends/stream/tests/test_outputs.py | 7 --- yt/tests/test_load_arg_parsing.py | 74 +++++++++++++++++++++++ yt/tests/test_load_errors.py | 43 +++++++++++++ yt/tests/test_load_from_path.py | 41 ------------- 5 files changed, 117 insertions(+), 49 deletions(-) create mode 100644 yt/tests/test_load_arg_parsing.py create mode 100644 yt/tests/test_load_errors.py delete mode 100644 yt/tests/test_load_from_path.py diff --git a/tests/tests.yaml b/tests/tests.yaml index 893ee953a73..f8a34668ecf 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -161,6 +161,5 @@ answer_tests: other_tests: unittests: - '--exclude=test_mesh_slices' # disable randomly failing test - - '--exclude=test_load_from_path' # py2 cookbook: - 'doc/source/cookbook/tests/test_cookbook.py' diff --git a/yt/frontends/stream/tests/test_outputs.py b/yt/frontends/stream/tests/test_outputs.py index a6e3fb562d3..e9fe9f39204 100644 --- a/yt/frontends/stream/tests/test_outputs.py +++ b/yt/frontends/stream/tests/test_outputs.py @@ -5,14 +5,12 @@ import numpy as np -from yt.convenience import load from yt.frontends.stream.data_structures import load_particles, load_uniform_grid from yt.testing import assert_equal, assert_raises from yt.utilities.exceptions import ( YTInconsistentGridFieldShape, YTInconsistentGridFieldShapeGridDims, YTInconsistentParticleFieldShape, - YTOutputNotIdentified, ) @@ -32,11 +30,6 @@ def tearDown(self): os.chdir(self.curdir) shutil.rmtree(self.tmpdir) - def test_load_empty_file(self): - assert_raises(YTOutputNotIdentified, load, "not_a_file") - assert_raises(YTOutputNotIdentified, load, "empty_file") - assert_raises(YTOutputNotIdentified, load, "empty_directory") - def test_dimensionless_field_units(): Z = np.random.uniform(size=(32, 32, 32)) diff --git a/yt/tests/test_load_arg_parsing.py b/yt/tests/test_load_arg_parsing.py new file mode 100644 index 00000000000..4eec4f42f9a --- /dev/null +++ b/yt/tests/test_load_arg_parsing.py @@ -0,0 +1,74 @@ +import json +import os +from pathlib import Path + +import pytest + +from yt.convenience import _sanitize_load_args, load +from yt.funcs import ensure_list +from yt.utilities.exceptions import YTOutputNotIdentified + + +def test_sanitize_empty(): + sargs = _sanitize_load_args() + assert sargs == [] + + +@pytest.mark.parametrize("obj", [None, 123, 2.0, (None, 123), [None, 123]]) +def test_sanitize_arbitrary_obj(obj): + # check that (nested) objects which are neither pathlike nor iterable are unaltered + args = ensure_list(obj) + sargs = _sanitize_load_args(*args) + + # this is a quick hack to check that results are structuraly equivalent + # if tuples are converted to list or vice versa it's not expected to be an issue + assert json.dumps(sargs) == json.dumps(args) + + +def test_raise_filenotfound(tmpdir): + with pytest.raises(FileNotFoundError): + load(os.path.join(tmpdir, "non_existing_file")) + + +def test_raise_unidentifiedtype(tmpdir): + p = tmpdir.mkdir("sub").join("invalid_data.txt") + p.write("This should never be valid") + with pytest.raises(YTOutputNotIdentified): + load(p) + + +class TestSanitizePathLike: + def test_sanitize_from_simple_path(self): + # check for basic path + p1 = os.path.join("not", "a", "real", "datafile.hdf5") + + sp1 = _sanitize_load_args(p1) + + assert type(sp1) is list + assert type(sp1[0]) is str + assert sp1 == _sanitize_load_args(Path(p1)) + + def test_sanitize_from_two_paths(self): + # check for more than one path + p1 = [ + os.path.join("not", "a", "real", "datafile.hdf5"), + os.path.join("not", "real", "either", "datafile.hdf5"), + ] + + sp1 = _sanitize_load_args(*p1) + assert sp1 == p1 + + p2 = [Path(p) for p in p1] + assert sp1 == _sanitize_load_args(*p2) + + def test_sanitize_from_user_path(self): + # check for user "~" card expansion + p1 = os.path.join("~", "not", "a", "real", "datafile.hdf5") + p2 = Path(p1) + assert _sanitize_load_args(p1) == _sanitize_load_args(p2) + + def test_sanitize_from_wildcard_path(self): + # check with wildcards + p1 = os.path.join("not", "a", "real", "directory", "*.hdf5") + p2 = Path(p1) + assert _sanitize_load_args(p1) == _sanitize_load_args(p2) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py new file mode 100644 index 00000000000..c1811a98752 --- /dev/null +++ b/yt/tests/test_load_errors.py @@ -0,0 +1,43 @@ +import os +import tempfile +from pathlib import Path + +from yt.convenience import load, simulation +from yt.testing import assert_raises +from yt.utilities.exceptions import YTOutputNotIdentified, YTSimulationNotIdentified + + +def test_load_unexisting_data(): + with tempfile.TemporaryDirectory() as tmpdir: + assert_raises(OSError, load, os.path.join(tmpdir, "not_a_file")) + assert_raises(OSError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo") + + # this one is a design choice: it is preferable to report the most important + # problem in an error message (missing data is worse than a typo in + # simulation_type), so we make sure the error raised is not YTSimulationNotIdentified + assert_raises( + OSError, + simulation, + os.path.join(tmpdir, "not_a_file"), + "unregistered_simulation_type", + ) + + +def test_load_unidentified_data(): + with tempfile.TemporaryDirectory() as tmpdir: + empty_file_path = Path(tmpdir) / "empty_file" + empty_file_path.touch() + assert_raises(YTOutputNotIdentified, load, tmpdir) + assert_raises(YTOutputNotIdentified, load, empty_file_path) + assert_raises( + YTSimulationNotIdentified, + simulation, + tmpdir, + "unregistered_simulation_type", + ) + assert_raises( + YTSimulationNotIdentified, + simulation, + empty_file_path, + "unregistered_simulation_type", + ) diff --git a/yt/tests/test_load_from_path.py b/yt/tests/test_load_from_path.py deleted file mode 100644 index 2391325df37..00000000000 --- a/yt/tests/test_load_from_path.py +++ /dev/null @@ -1,41 +0,0 @@ -import os -from sys import version_info - -import pytest - -from yt.convenience import _sanitize_load_args - -PY36 = version_info >= (3, 6) - -if PY36: - from pathlib import Path - - -@pytest.mark.skipif(not PY36, reason="requires python3.6 or higher") -class TestSanitizeLoadArgs: - def test_sanitize_from_simple_path(self): - # check for basic path - p1 = os.path.join("not", "a", "real", "datafile.hdf5") - p2 = Path(p1) - assert _sanitize_load_args(p1) == _sanitize_load_args(p2) - - def test_sanitize_from_two_paths(self): - # check for more than one path - p1 = [ - os.path.join("not", "a", "real", "datafile.hdf5"), - os.path.join("not", "real", "either", "datafile.hdf5"), - ] - p2 = [Path(p) for p in p1] - assert _sanitize_load_args(*p1) == _sanitize_load_args(*p2) - - def test_sanitize_from_user_path(self): - # check for user "~" card expansion - p1 = os.path.join("~", "not", "a", "real", "datafile.hdf5") - p2 = Path(p1) - assert _sanitize_load_args(p1) == _sanitize_load_args(p2) - - def test_sanitize_from_wildcard_path(self): - # check with wildcards - p1 = os.path.join("not", "a", "real", "directory", "*.hdf5") - p2 = Path(p1) - assert _sanitize_load_args(p1) == _sanitize_load_args(p2) From f2aa3655c0ac3a396fe5698afe4bc6a0291a8539 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 28 Jun 2020 12:22:33 +0200 Subject: [PATCH 153/653] refactor: rewrite convenience functions yt.load() and yt.simulation() with flat logic blocks --- yt/convenience.py | 204 +++++++++++-------------- yt/tests/test_load_arg_parsing.py | 74 --------- yt/utilities/parameter_file_storage.py | 27 ---- 3 files changed, 93 insertions(+), 212 deletions(-) delete mode 100644 yt/tests/test_load_arg_parsing.py diff --git a/yt/convenience.py b/yt/convenience.py index 6cdb59310b1..5fcde741259 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -6,132 +6,114 @@ from yt.utilities.exceptions import YTOutputNotIdentified, YTSimulationNotIdentified from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.parameter_file_storage import ( - EnzoRunDatabase, output_type_registry, simulation_time_series_registry, ) -def _sanitize_load_args(*args): - """Filter out non-pathlike arguments, ensure list form, and expand '~' tokens""" - try: - # os.PathLike is python >= 3.6 - path_types = str, os.PathLike - except AttributeError: - path_types = (str,) +def load(fn, *args, **kwargs): + """ + Load a Dataset or DatasetSeries object. + The data format is automatically discovered, and the exact return type is the + corresponding subclass of :class:`yt.data_objects.static_output.Dataset`. + A :class:`yt.data_objects.time_series.DatasetSeries` is created if the first + argument is a pattern. - return [ - os.path.expanduser(arg) if isinstance(arg, path_types) else arg for arg in args - ] + Parameters + ---------- + fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) + A path to the data location. This can be a file name, directory name, a glob + pattern, or a url (for data types that support it). + Additional arguments, if any, are passed down to the return class. -def load(*args, **kwargs): - """ - This function attempts to determine the base data type of a filename or - other set of arguments by calling - :meth:`yt.data_objects.static_output.Dataset._is_valid` until it finds a - match, at which point it returns an instance of the appropriate - :class:`yt.data_objects.static_output.Dataset` subclass. + Returns + ------- + :class:`yt.data_objects.static_output.Dataset` object + If fn is a single path, create a Dataset from the appropriate subclass. + + :class:`yt.data_objects.time_series.DatasetSeries` + If fn is a glob pattern (i.e. containing wildcards '[]?!*'), create a series. + + Raises + ------ + OSError + If fn does not match any existing file or directory. + + yt.utilities.exceptions.YTOutputNotIdentified + If fn matches existing files or directories with undetermined format. """ - args = _sanitize_load_args(*args) - candidates = [] - valid_file = [] - for argno, arg in enumerate(args): - if isinstance(arg, str): - if os.path.exists(arg): - valid_file.append(True) - elif arg.startswith("http"): - valid_file.append(True) - else: - if os.path.exists(os.path.join(ytcfg.get("yt", "test_data_dir"), arg)): - valid_file.append(True) - args[argno] = os.path.join(ytcfg.get("yt", "test_data_dir"), arg) - else: - valid_file.append(False) - else: - valid_file.append(False) - types_to_check = output_type_registry - if not any(valid_file): - try: - from yt.data_objects.time_series import DatasetSeries - - ts = DatasetSeries(*args, **kwargs) - return ts - except (TypeError, OSError, YTOutputNotIdentified): - pass - # We check if either the first argument is a dict or list, in which - # case we try identifying candidates. - if len(args) > 0 and isinstance(args[0], (list, dict)): - # This fixes issues where it is assumed the first argument is a - # file - types_to_check = dict( - (n, v) - for n, v in output_type_registry.items() - if n.startswith("stream_") - ) - # Better way to do this is to override the output_type_registry + fn = os.path.expanduser(fn) + + if any(wildcard in fn for wildcard in "[]?!*"): + from yt.data_objects.time_series import DatasetSeries + + return DatasetSeries(fn, *args, **kwargs) + + if not (os.path.exists(fn) or fn.startswith("http")): + data_dir = ytcfg.get("yt", "test_data_dir") + alt_fn = os.path.join(data_dir, fn) + if os.path.exists(alt_fn): + fn = alt_fn else: - mylog.error("None of the arguments provided to load() is a valid file") - mylog.error("Please check that you have used a correct path") - raise YTOutputNotIdentified(args, kwargs) - for n, c in types_to_check.items(): - if n is None: - continue - if c._is_valid(*args, **kwargs): - candidates.append(n) - - # convert to classes - candidates = [output_type_registry[c] for c in candidates] + msg = "No such file or directory: %s" % fn + if os.path.exists(data_dir): + msg += "\n(Also tried %s)" % alt_fn + raise OSError(msg) + + candidates = [] + for cls in output_type_registry.values(): + if cls._is_valid(fn, *args, **kwargs): + candidates.append(cls) + # Find only the lowest subclasses, i.e. most specialised front ends candidates = find_lowest_subclasses(candidates) + if len(candidates) == 1: - return candidates[0](*args, **kwargs) - if len(candidates) == 0: - if ( - ytcfg.get("yt", "enzo_db") != "" - and len(args) == 1 - and isinstance(args[0], str) - ): - erdb = EnzoRunDatabase() - fn = erdb.find_uuid(args[0]) - n = "EnzoDataset" - if n in output_type_registry and output_type_registry[n]._is_valid(fn): - return output_type_registry[n](fn) - mylog.error("Couldn't figure out output type for %s", args[0]) - raise YTOutputNotIdentified(args, kwargs) - - mylog.error("Multiple output type candidates for %s:", args[0]) - for c in candidates: - mylog.error(" Possible: %s", c) - raise YTOutputNotIdentified(args, kwargs) - - -def simulation(parameter_filename, simulation_type, find_outputs=False): + return candidates[0](fn, *args, **kwargs) + + if len(candidates) > 1: + mylog.error("Multiple output type candidates for %s:", fn) + for c in candidates: + mylog.error(" Possible: %s", c) + + raise YTOutputNotIdentified([fn, *args], kwargs) + + +def simulation(fn, simulation_type, find_outputs=False): """ - Loads a simulation time series object of the specified - simulation type. + Load a simulation time series object of the specified simulation type. + + Parameters + ---------- + fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) + Name of the data file or directory. + + simulation_type : str + E.g. 'Enzo' + + find_outputs : bool + Defaults to False + + Raises + ------ + OSError + If fn is not found. + + yt.utilities.exceptions.YTSimulationNotIdentified + If simulation_type is unknown. """ - if simulation_type not in simulation_time_series_registry: + if not os.path.exists(fn): + alt_fn = os.path.join(ytcfg.get("yt", "test_data_dir"), fn) + if os.path.exists(alt_fn): + fn = alt_fn + else: + raise OSError("No such file or directory: %s" % fn) + + try: + cls = simulation_time_series_registry[simulation_type] + except KeyError: raise YTSimulationNotIdentified(simulation_type) - if os.path.exists(parameter_filename): - valid_file = True - elif os.path.exists( - os.path.join(ytcfg.get("yt", "test_data_dir"), parameter_filename) - ): - parameter_filename = os.path.join( - ytcfg.get("yt", "test_data_dir"), parameter_filename - ) - valid_file = True - else: - valid_file = False - - if not valid_file: - raise YTOutputNotIdentified( - (parameter_filename, simulation_type), dict(find_outputs=find_outputs) - ) - - return simulation_time_series_registry[simulation_type]( - parameter_filename, find_outputs=find_outputs - ) + return cls(fn, find_outputs=find_outputs) diff --git a/yt/tests/test_load_arg_parsing.py b/yt/tests/test_load_arg_parsing.py deleted file mode 100644 index 4eec4f42f9a..00000000000 --- a/yt/tests/test_load_arg_parsing.py +++ /dev/null @@ -1,74 +0,0 @@ -import json -import os -from pathlib import Path - -import pytest - -from yt.convenience import _sanitize_load_args, load -from yt.funcs import ensure_list -from yt.utilities.exceptions import YTOutputNotIdentified - - -def test_sanitize_empty(): - sargs = _sanitize_load_args() - assert sargs == [] - - -@pytest.mark.parametrize("obj", [None, 123, 2.0, (None, 123), [None, 123]]) -def test_sanitize_arbitrary_obj(obj): - # check that (nested) objects which are neither pathlike nor iterable are unaltered - args = ensure_list(obj) - sargs = _sanitize_load_args(*args) - - # this is a quick hack to check that results are structuraly equivalent - # if tuples are converted to list or vice versa it's not expected to be an issue - assert json.dumps(sargs) == json.dumps(args) - - -def test_raise_filenotfound(tmpdir): - with pytest.raises(FileNotFoundError): - load(os.path.join(tmpdir, "non_existing_file")) - - -def test_raise_unidentifiedtype(tmpdir): - p = tmpdir.mkdir("sub").join("invalid_data.txt") - p.write("This should never be valid") - with pytest.raises(YTOutputNotIdentified): - load(p) - - -class TestSanitizePathLike: - def test_sanitize_from_simple_path(self): - # check for basic path - p1 = os.path.join("not", "a", "real", "datafile.hdf5") - - sp1 = _sanitize_load_args(p1) - - assert type(sp1) is list - assert type(sp1[0]) is str - assert sp1 == _sanitize_load_args(Path(p1)) - - def test_sanitize_from_two_paths(self): - # check for more than one path - p1 = [ - os.path.join("not", "a", "real", "datafile.hdf5"), - os.path.join("not", "real", "either", "datafile.hdf5"), - ] - - sp1 = _sanitize_load_args(*p1) - assert sp1 == p1 - - p2 = [Path(p) for p in p1] - assert sp1 == _sanitize_load_args(*p2) - - def test_sanitize_from_user_path(self): - # check for user "~" card expansion - p1 = os.path.join("~", "not", "a", "real", "datafile.hdf5") - p2 = Path(p1) - assert _sanitize_load_args(p1) == _sanitize_load_args(p2) - - def test_sanitize_from_wildcard_path(self): - # check with wildcards - p1 = os.path.join("not", "a", "real", "directory", "*.hdf5") - p2 = Path(p1) - assert _sanitize_load_args(p1) == _sanitize_load_args(p2) diff --git a/yt/utilities/parameter_file_storage.py b/yt/utilities/parameter_file_storage.py index 9ea97c65433..db8a6f7e41a 100644 --- a/yt/utilities/parameter_file_storage.py +++ b/yt/utilities/parameter_file_storage.py @@ -198,30 +198,3 @@ def read_db(self): else: v["last_seen"] = float(v["last_seen"]) return db - - -class ObjectStorage: - pass - - -class EnzoRunDatabase: - conn = None - - def __init__(self, path=None): - if path is None: - path = ytcfg.get("yt", "enzo_db") - if len(path) == 0: - raise RuntimeError - import sqlite3 - - self.conn = sqlite3.connect(path) - - def find_uuid(self, u): - cursor = self.conn.execute( - "select ds_path from enzo_outputs where dset_uuid = '%s'" % (u) - ) - # It's a 'unique key' - result = cursor.fetchone() - if result is None: - return None - return result[0] From e424c1329534d8664dcdc14c7130f2227aab79a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 28 Jun 2020 12:34:30 +0200 Subject: [PATCH 154/653] fix: adapt pre-existing try-blocks to new errors --- yt/frontends/enzo/simulation_handling.py | 24 +++++++++---------- yt/frontends/exodus_ii/simulation_handling.py | 19 +++++++-------- yt/frontends/gadget/simulation_handling.py | 24 +++++++++---------- yt/utilities/answer_testing/framework.py | 4 ++-- yt/utilities/answer_testing/utils.py | 4 ++-- 5 files changed, 36 insertions(+), 39 deletions(-) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 73418189bf2..c8c929388c4 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -656,18 +656,18 @@ def _check_for_outputs(self, potential_outputs): "%s%s" % (dir_key, index), "%s%s" % (output_key, index), ) - if os.path.exists(filename): - try: - ds = load(filename) - if ds is not None: - my_storage.result = { - "filename": filename, - "time": ds.current_time.in_units("s"), - } - if ds.cosmological_simulation: - my_storage.result["redshift"] = ds.current_redshift - except YTOutputNotIdentified: - mylog.error("Failed to load %s", filename) + try: + ds = load(filename) + my_storage.result = { + "filename": filename, + "time": ds.current_time.in_units("s"), + } + if ds.cosmological_simulation: + my_storage.result["redshift"] = ds.current_redshift + except OSError: + pass + except YTOutputNotIdentified: + mylog.error("Failed to load %s", filename) mylog.setLevel(llevel) my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index 874208dcd1e..3778d609828 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -1,12 +1,10 @@ import glob -import os - from yt.convenience import load -from yt.data_objects.time_series import DatasetSeries, RegisteredSimulationTimeSeries from yt.funcs import only_on_root from yt.utilities.exceptions import YTOutputNotIdentified from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects +from yt.data_objects.time_series import DatasetSeries, RegisteredSimulationTimeSeries class ExodusIISimulation(DatasetSeries, metaclass=RegisteredSimulationTimeSeries): @@ -90,14 +88,13 @@ def _check_for_outputs(self, potential_outputs): for my_storage, output in parallel_objects( potential_outputs, storage=my_outputs ): - if os.path.exists(output): - try: - ds = load(output) - if ds is not None: - num_steps = ds.num_steps - my_storage.result = {"filename": output, "num_steps": num_steps} - except YTOutputNotIdentified: - mylog.error("Failed to load %s", output) + try: + ds = load(output) + my_storage.result = {"filename": output, "num_steps": ds.num_steps} + except OSError: + pass + except YTOutputNotIdentified: + mylog.error("Failed to load %s", output) my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None ] diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 6ce53f2ddd7..8ebf282a232 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -521,18 +521,18 @@ def _check_for_outputs(self, potential_outputs): for my_storage, output in parallel_objects( potential_outputs, storage=my_outputs ): - if os.path.exists(output): - try: - ds = load(output) - if ds is not None: - my_storage.result = { - "filename": output, - "time": ds.current_time.in_units("s"), - } - if ds.cosmological_simulation: - my_storage.result["redshift"] = ds.current_redshift - except YTOutputNotIdentified: - mylog.error("Failed to load %s", output) + try: + ds = load(output) + my_storage.result = { + "filename": output, + "time": ds.current_time.in_units("s"), + } + if ds.cosmological_simulation: + my_storage.result["redshift"] = ds.current_redshift + except OSError: + pass + except YTOutputNotIdentified: + mylog.error("Failed to load %s", output) my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None ] diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index d5725392a56..33379e0dade 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -307,7 +307,7 @@ def can_run_ds(ds_fn, file_check=False): return os.path.isfile(os.path.join(path, ds_fn)) and result_storage is not None try: load(ds_fn) - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: result_storage["tainted"] = True @@ -327,7 +327,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): return os.path.isfile(os.path.join(path, sim_fn)) and result_storage is not None try: simulation(sim_fn, sim_type) - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: result_storage["tainted"] = True diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index f280751101c..567a6d2c810 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -310,7 +310,7 @@ def can_run_ds(ds_fn, file_check=False): try: load(ds_fn) return True - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): return False @@ -326,7 +326,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): return os.path.isfile(os.path.join(path, sim_fn)) try: simulation(sim_fn, sim_type) - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): return False return True From 4627d5c37c0b433f4e18b4b155ac4ca88af08de5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 09:09:03 +0200 Subject: [PATCH 155/653] isort happy --- yt/frontends/exodus_ii/simulation_handling.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index 3778d609828..88bd41b1c06 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -1,10 +1,11 @@ import glob + from yt.convenience import load +from yt.data_objects.time_series import DatasetSeries, RegisteredSimulationTimeSeries from yt.funcs import only_on_root from yt.utilities.exceptions import YTOutputNotIdentified from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects -from yt.data_objects.time_series import DatasetSeries, RegisteredSimulationTimeSeries class ExodusIISimulation(DatasetSeries, metaclass=RegisteredSimulationTimeSeries): From c9f644846054f821e6a7bb19c347143d710f103c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 26 Jun 2020 23:03:19 +0200 Subject: [PATCH 156/653] refactor: rephrase YTOutputNotIdentified error message, only print args and kwargs in case something is actually passed --- yt/convenience.py | 2 +- yt/data_objects/time_series.py | 2 +- yt/utilities/exceptions.py | 10 ++++++++-- 3 files changed, 10 insertions(+), 4 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index 5fcde741259..1f5bd4ad421 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -77,7 +77,7 @@ def load(fn, *args, **kwargs): for c in candidates: mylog.error(" Possible: %s", c) - raise YTOutputNotIdentified([fn, *args], kwargs) + raise YTOutputNotIdentified(fn, args, kwargs) def simulation(fn, simulation_type, find_outputs=False): diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 1cd50b8b63d..6660b175cd6 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -163,7 +163,7 @@ def __new__(cls, outputs, *args, **kwargs): try: ret._pre_outputs = outputs[:] except TypeError: - raise YTOutputNotIdentified(outputs, {}) + raise YTOutputNotIdentified(outputs) return ret def __init__( diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index cc0d68e5c9e..6d582da0df7 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -14,12 +14,18 @@ def __init__(self, message=None, ds=None): class YTOutputNotIdentified(YTException): - def __init__(self, args, kwargs): + def __init__(self, filename, args=None, kwargs=None): + self.filename = filename self.args = args self.kwargs = kwargs def __str__(self): - return "Supplied %s %s, but could not load!" % (self.args, self.kwargs) + msg = "Could not determine input format from %s" % self.filename + if self.args is not None: + msg += ", %s" % self.args + if self.kwargs is not None: + msg += ", %s" % self.kwargs + return msg class YTSphereTooSmall(YTException): From 4ab9621868b31fbce3a06dc084dab35b52299689 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 23 Jul 2020 19:13:17 +0200 Subject: [PATCH 157/653] [ci skip] reorder pre-commit hooks so that flake8 only complains AFTER black and isort did their parts, also harmonize PR template with pre-commit hooks order --- .github/PULL_REQUEST_TEMPLATE.md | 4 ++-- .pre-commit-config.yaml | 14 +++++++------- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index f8f443bf401..997c86e4bba 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -20,9 +20,9 @@ detail. Why is this change required? What problem does it solve?--> -- [ ] pass `flake8 yt/` -- [ ] pass `isort . --check --diff` - [ ] pass `black --check yt/` +- [ ] pass `isort . --check --diff` +- [ ] pass `flake8 yt/` - [ ] New features are documented, with docstrings and narrative docs - [ ] Adds a test for any bugs fixed. Adds tests for new features. diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index c5648aa48aa..aa45186745d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,13 +1,13 @@ -- repo: https://gitlab.com/pycqa/flake8 - rev: '' +- repo: https://github.com/ambv/black + rev: 19.10b0 hooks: - - id: flake8 + - id: black + language_version: python3.7 - repo: https://github.com/timothycrosley/isort rev: '' hooks: - id: isort -- repo: https://github.com/ambv/black - rev: 19.10b0 +- repo: https://gitlab.com/pycqa/flake8 + rev: '' hooks: - - id: black - language_version: python3.7 + - id: flake8 From 02d2945291edc9977743c5b4727d3de4543fd2fb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 26 Jul 2020 16:43:58 +0200 Subject: [PATCH 158/653] pin flake8 and isort version in precommit hooks to match lint_requirements.txt --- .pre-commit-config.yaml | 4 ++-- tests/lint_requirements.txt | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index aa45186745d..5a15a4e195a 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,10 +4,10 @@ - id: black language_version: python3.7 - repo: https://github.com/timothycrosley/isort - rev: '' + rev: '5.1.4' # keep in sync with tests/lint_requirements.txt hooks: - id: isort - repo: https://gitlab.com/pycqa/flake8 - rev: '' + rev: '3.8.1' # keep in sync with tests/lint_requirements.txt hooks: - id: flake8 diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 52a3fd3b476..d178db40118 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -1,7 +1,7 @@ -flake8==3.8.1 +flake8==3.8.1 # keep in sync with .pre-commit-config.yaml mccabe==0.6.1 pycodestyle==2.6.0 pyflakes==2.2.0 -isort~=5.1 +isort==5.1.4 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear From 42bea5e759a64b71b06b3cef5d2d32da8777fb2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 27 Jul 2020 11:23:23 +0200 Subject: [PATCH 159/653] upgrade pinned isort version (doesn't change a line in the code base) --- .pre-commit-config.yaml | 2 +- tests/lint_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 5a15a4e195a..1419abcb0b9 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ - id: black language_version: python3.7 - repo: https://github.com/timothycrosley/isort - rev: '5.1.4' # keep in sync with tests/lint_requirements.txt + rev: '5.2.0' # keep in sync with tests/lint_requirements.txt hooks: - id: isort - repo: https://gitlab.com/pycqa/flake8 diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index d178db40118..f5f3dbae54c 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -2,6 +2,6 @@ flake8==3.8.1 # keep in sync with .pre-commit-config.yaml mccabe==0.6.1 pycodestyle==2.6.0 pyflakes==2.2.0 -isort==5.1.4 # keep in sync with .pre-commit-config.yaml +isort==5.2.0 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear From 9cebcbbdf7231a22e56ac01726c9ccdd2d120d7d Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Mon, 27 Jul 2020 14:28:08 -0500 Subject: [PATCH 160/653] adding example slash command --- .github/workflows/slash-command-dispatch.yml | 14 ++++++++++++++ 1 file changed, 14 insertions(+) create mode 100644 .github/workflows/slash-command-dispatch.yml diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml new file mode 100644 index 00000000000..f165c98827b --- /dev/null +++ b/.github/workflows/slash-command-dispatch.yml @@ -0,0 +1,14 @@ +name: Slash Command Dispatch +on: + issue_comment: + types: [created] +jobs: + slashCommandDispatch: + runs-on: ubuntu-latest + steps: + - name: Slash Command Dispatch + uses: peter-evans/slash-command-dispatch@v2 + with: + token: ${{ secrets.PAT }} + commands: example + repository: matthewturk/slash-command-processor From e5ade092cfbfe6f97249e3fb40cd118208d795b2 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Mon, 27 Jul 2020 14:40:13 -0500 Subject: [PATCH 161/653] register some more commands --- .github/workflows/slash-command-dispatch.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index f165c98827b..e358fe0cc61 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -10,5 +10,8 @@ jobs: uses: peter-evans/slash-command-dispatch@v2 with: token: ${{ secrets.PAT }} - commands: example + commands: | + example + black + rebase repository: matthewturk/slash-command-processor From 1b44e3fede0a14da57e8876bd546a0634ae8a563 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Tue, 28 Jul 2020 08:24:57 -0500 Subject: [PATCH 162/653] add format and help, remove example --- .github/workflows/slash-command-dispatch.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index e358fe0cc61..d6e4d34420c 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -11,7 +11,7 @@ jobs: with: token: ${{ secrets.PAT }} commands: | - example - black + help + format rebase repository: matthewturk/slash-command-processor From 5497e835472f34cd4dd5467557a7c8eb832e18b5 Mon Sep 17 00:00:00 2001 From: Michael Zingale Date: Tue, 28 Jul 2020 11:01:11 -0400 Subject: [PATCH 163/653] apply flake8, isort, and black corrections --- yt/visualization/plot_window.py | 37 ++++++++++++++++++++------------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index f92c00b7aee..44976a3c2c8 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -6,8 +6,8 @@ import matplotlib import matplotlib.pyplot as plt -from mpl_toolkits.axes_grid1 import ImageGrid import numpy as np +from mpl_toolkits.axes_grid1 import ImageGrid from unyt.exceptions import UnitConversionError from yt.data_objects.image_array import ImageArray @@ -1281,11 +1281,16 @@ def run_callbacks(self): if key not in keys: del self.frb[key] - - def export_to_mpl_figure(self, nrows_ncols, axes_pad=1.0, - label_mode="L", - cbar_location="right", cbar_size="5%", - cbar_mode="each", cbar_pad="0%"): + def export_to_mpl_figure( + self, + nrows_ncols, + axes_pad=1.0, + label_mode="L", + cbar_location="right", + cbar_size="5%", + cbar_mode="each", + cbar_pad="0%", + ): r""" Creates a matplotlib figure object with the specified axes arrangement, nrows_ncols, and maps the underlying figures to the matplotlib axes. Note that all of these @@ -1331,14 +1336,17 @@ def export_to_mpl_figure(self, nrows_ncols, axes_pad=1.0, """ fig = plt.figure() - grid = ImageGrid(fig, 111, - nrows_ncols=nrows_ncols, - axes_pad=axes_pad, - label_mode=label_mode, - cbar_location=cbar_location, - cbar_size=cbar_size, - cbar_mode=cbar_mode, - cbar_pad=cbar_pad) + grid = ImageGrid( + fig, + 111, + nrows_ncols=nrows_ncols, + axes_pad=axes_pad, + label_mode=label_mode, + cbar_location=cbar_location, + cbar_size=cbar_size, + cbar_mode=cbar_mode, + cbar_pad=cbar_pad, + ) fields = self.fields if len(fields) > len(grid): @@ -1354,6 +1362,7 @@ def export_to_mpl_figure(self, nrows_ncols, axes_pad=1.0, return fig + class AxisAlignedSlicePlot(PWViewerMPL): r"""Creates a slice plot from a dataset From 64de0067349724ce083f0ef313e09d11a6f9e571 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 08:40:40 +0200 Subject: [PATCH 164/653] remove poster module since its functionalities are covered by requests --- .coveragerc | 1 - .pep8speaks.yml | 1 - setup.cfg | 1 - yt/utilities/minimal_representation.py | 10 - yt/utilities/poster/README | 4 - yt/utilities/poster/__init__.py | 32 -- yt/utilities/poster/encode.py | 454 ------------------------- yt/utilities/poster/streaminghttp.py | 222 ------------ 8 files changed, 725 deletions(-) delete mode 100644 yt/utilities/poster/README delete mode 100644 yt/utilities/poster/__init__.py delete mode 100644 yt/utilities/poster/encode.py delete mode 100644 yt/utilities/poster/streaminghttp.py diff --git a/.coveragerc b/.coveragerc index 2dea13250ef..597b2a5fb90 100644 --- a/.coveragerc +++ b/.coveragerc @@ -13,7 +13,6 @@ omit=*.yml yt/mods.py yt/utilities/fits_image.py yt/utilities/lodgeit.py - yt/utilities/poster/* yt/visualization/_mpl_imports.py ignore_errors = True diff --git a/.pep8speaks.yml b/.pep8speaks.yml index 3538cb33772..d26d0d164d0 100644 --- a/.pep8speaks.yml +++ b/.pep8speaks.yml @@ -75,7 +75,6 @@ pycodestyle: - \*/__config__.py - yt/visualization/_mpl_imports.py - yt/utilities/lodgeit.py - - yt/utilities/poster/\* - yt/extern/\* - yt/mods.py - yt/utilities/fits_image.py diff --git a/setup.cfg b/setup.cfg index 2cedac43241..3b6f6c7aeb0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -16,7 +16,6 @@ exclude = doc, */api.py, # avoid spurious "unused import" */__init__.py, # avoid spurious "unused import" */__config__.py, # autogenerated - yt/utilities/poster, yt/extern, # vendored libraries yt/units, # wrapper around unyt, avoid spurious "unused import" yt/frontends/stream/sample_data, # autogenerated diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index 2d08bb73b54..7834c42d706 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -16,16 +16,6 @@ from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5 -if sys.version_info < (3, 0): - from .poster.encode import multipart_encode - from .poster.streaminghttp import register_openers - - register_openers() -else: - # We don't yet have a solution for this, but it won't show up very often - # anyway. - pass - def _sanitize_list(flist): temp = [] diff --git a/yt/utilities/poster/README b/yt/utilities/poster/README deleted file mode 100644 index 07b34858900..00000000000 --- a/yt/utilities/poster/README +++ /dev/null @@ -1,4 +0,0 @@ -Poster is a module by Chris AtLee, licensed under the MIT License, included -here. For more information, see the poster home page: - -http://atlee.ca/software/poster diff --git a/yt/utilities/poster/__init__.py b/yt/utilities/poster/__init__.py deleted file mode 100644 index bfc91acc967..00000000000 --- a/yt/utilities/poster/__init__.py +++ /dev/null @@ -1,32 +0,0 @@ -# Copyright (c) 2011 Chris AtLee -# -# Permission is hereby granted, free of charge, to any person obtaining a copy -# of this software and associated documentation files (the "Software"), to deal -# in the Software without restriction, including without limitation the rights -# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -# copies of the Software, and to permit persons to whom the Software is -# furnished to do so, subject to the following conditions: -# -# The above copyright notice and this permission notice shall be included in -# all copies or substantial portions of the Software. -# -# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -# THE SOFTWARE. -"""poster module - -Support for streaming HTTP uploads, and multipart/form-data encoding - -```poster.version``` is a 3-tuple of integers representing the version number. -New releases of poster will always have a version number that compares greater -than an older version of poster. -New in version 0.6.""" - - -from . import encode, streaminghttp - -version = (0, 8, 1) # Thanks JP! diff --git a/yt/utilities/poster/encode.py b/yt/utilities/poster/encode.py deleted file mode 100644 index 9049bde0257..00000000000 --- a/yt/utilities/poster/encode.py +++ /dev/null @@ -1,454 +0,0 @@ -"""multipart/form-data encoding module - -This module provides functions that facilitate encoding name/value pairs -as multipart/form-data suitable for a HTTP POST or PUT request. - -multipart/form-data is the standard way to upload files over HTTP""" - -__all__ = [ - "gen_boundary", - "encode_and_quote", - "MultipartParam", - "encode_string", - "encode_file_header", - "get_body_size", - "get_headers", - "multipart_encode", -] - -try: - import uuid - - def gen_boundary(): - """Returns a random string to use as the boundary for a message""" - return uuid.uuid4().hex - - -except ImportError: - import random - - import sha - - def gen_boundary(): - """Returns a random string to use as the boundary for a message""" - bits = random.getrandbits(160) - return sha.new(str(bits)).hexdigest() - - -import mimetypes -import os -import re -import urllib - -try: - from email.header import Header -except ImportError: - # Python 2.4 - from email.Header import Header - - -def encode_and_quote(data): - """If ``data`` is unicode, return urllib.quote_plus(data.encode("utf-8")) - otherwise return urllib.quote_plus(data)""" - if data is None: - return None - - if isinstance(data, str): - data = data.encode("utf-8") - return urllib.quote_plus(data) - - -def _strify(s): - """If s is a unicode string, encode it to UTF-8 and return the results, - otherwise return str(s), or None if s is None""" - if s is None: - return None - if isinstance(s, str): - return s.encode("utf-8") - return str(s) - - -class MultipartParam: - """Represents a single parameter in a multipart/form-data request - - ``name`` is the name of this parameter. - - If ``value`` is set, it must be a string or unicode object to use as the - data for this parameter. - - If ``filename`` is set, it is what to say that this parameter's filename - is. Note that this does not have to be the actual filename any local file. - - If ``filetype`` is set, it is used as the Content-Type for this parameter. - If unset it defaults to "text/plain; charset=utf8" - - If ``filesize`` is set, it specifies the length of the file ``fileobj`` - - If ``fileobj`` is set, it must be a file-like object that supports - .read(). - - Both ``value`` and ``fileobj`` must not be set, doing so will - raise a ValueError assertion. - - If ``fileobj`` is set, and ``filesize`` is not specified, then - the file's size will be determined first by stat'ing ``fileobj``'s - file descriptor, and if that fails, by seeking to the end of the file, - recording the current position as the size, and then by seeking back to the - beginning of the file. - - ``cb`` is a callable which will be called from iter_encode with (self, - current, total), representing the current parameter, current amount - transferred, and the total size. - """ - - def __init__( - self, - name, - value=None, - filename=None, - filetype=None, - filesize=None, - fileobj=None, - cb=None, - ): - self.name = Header(name).encode() - self.value = _strify(value) - if filename is None: - self.filename = None - else: - if isinstance(filename, str): - # Encode with XML entities - self.filename = filename.encode("ascii", "xmlcharrefreplace") - else: - self.filename = str(filename) - self.filename = self.filename.encode("string_escape").replace('"', '\\"') - self.filetype = _strify(filetype) - - self.filesize = filesize - self.fileobj = fileobj - self.cb = cb - - if self.value is not None and self.fileobj is not None: - raise ValueError("Only one of value or fileobj may be specified") - - if fileobj is not None and filesize is None: - # Try and determine the file size - try: - self.filesize = os.fstat(fileobj.fileno()).st_size - except (OSError, AttributeError): - try: - fileobj.seek(0, 2) - self.filesize = fileobj.tell() - fileobj.seek(0) - except Exception: - raise ValueError("Could not determine filesize") - - def __lt__(self, other): - attrs = ["name", "value", "filename", "filetype", "filesize", "fileobj"] - myattrs = [getattr(self, a) for a in attrs] - oattrs = [getattr(other, a) for a in attrs] - return myattrs < oattrs - - def reset(self): - if self.fileobj is not None: - self.fileobj.seek(0) - elif self.value is None: - raise ValueError("Don't know how to reset this parameter") - - @classmethod - def from_file(cls, paramname, filename): - """Returns a new MultipartParam object constructed from the local - file at ``filename``. - - ``filesize`` is determined by os.path.getsize(``filename``) - - ``filetype`` is determined by mimetypes.guess_type(``filename``)[0] - - ``filename`` is set to os.path.basename(``filename``) - """ - - return cls( - paramname, - filename=os.path.basename(filename), - filetype=mimetypes.guess_type(filename)[0], - filesize=os.path.getsize(filename), - fileobj=open(filename, "rb"), - ) - - @classmethod - def from_params(cls, params): - """Returns a list of MultipartParam objects from a sequence of - name, value pairs, MultipartParam instances, - or from a mapping of names to values - - The values may be strings or file objects, or MultipartParam objects. - MultipartParam object names must match the given names in the - name,value pairs or mapping, if applicable.""" - if hasattr(params, "items"): - params = params.items() - - retval = [] - for item in params: - if isinstance(item, cls): - retval.append(item) - continue - name, value = item - if isinstance(value, cls): - assert value.name == name - retval.append(value) - continue - if hasattr(value, "read"): - # Looks like a file object - filename = getattr(value, "name", None) - if filename is not None: - filetype = mimetypes.guess_type(filename)[0] - else: - filetype = None - - retval.append( - cls(name=name, filename=filename, filetype=filetype, fileobj=value) - ) - else: - retval.append(cls(name, value)) - return retval - - def encode_hdr(self, boundary): - """Returns the header of the encoding of this parameter""" - boundary = encode_and_quote(boundary) - - headers = ["--%s" % boundary] - - if self.filename: - disposition = 'form-data; name="%s"; filename="%s"' % ( - self.name, - self.filename, - ) - else: - disposition = 'form-data; name="%s"' % self.name - - headers.append("Content-Disposition: %s" % disposition) - - if self.filetype: - filetype = self.filetype - else: - filetype = "text/plain; charset=utf-8" - - headers.append("Content-Type: %s" % filetype) - - headers.append("") - headers.append("") - - return "\r\n".join(headers) - - def encode(self, boundary): - """Returns the string encoding of this parameter""" - if self.value is None: - value = self.fileobj.read() - else: - value = self.value - - if re.search("^--%s$" % re.escape(boundary), value, re.M): - raise ValueError("boundary found in encoded string") - - return "%s%s\r\n" % (self.encode_hdr(boundary), value) - - def iter_encode(self, boundary, blocksize=4096): - """Yields the encoding of this parameter - If self.fileobj is set, then blocks of ``blocksize`` bytes are read and - yielded.""" - total = self.get_size(boundary) - current = 0 - if self.value is not None: - block = self.encode(boundary) - current += len(block) - yield block - if self.cb: - self.cb(self, current, total) - else: - block = self.encode_hdr(boundary) - current += len(block) - yield block - if self.cb: - self.cb(self, current, total) - last_block = "" - encoded_boundary = "--%s" % encode_and_quote(boundary) - boundary_exp = re.compile("^%s$" % re.escape(encoded_boundary), re.M) - while True: - block = self.fileobj.read(blocksize) - if not block: - current += 2 - yield "\r\n" - if self.cb: - self.cb(self, current, total) - break - last_block += block - if boundary_exp.search(last_block): - raise ValueError("boundary found in file data") - last_block = last_block[-len(encoded_boundary) - 2 :] - current += len(block) - yield block - if self.cb: - self.cb(self, current, total) - - def get_size(self, boundary): - """Returns the size in bytes that this param will be when encoded - with the given boundary.""" - if self.filesize is not None: - valuesize = self.filesize - else: - valuesize = len(self.value) - - return len(self.encode_hdr(boundary)) + 2 + valuesize - - -def encode_string(boundary, name, value): - """Returns ``name`` and ``value`` encoded as a multipart/form-data - variable. ``boundary`` is the boundary string used throughout - a single request to separate variables.""" - - return MultipartParam(name, value).encode(boundary) - - -def encode_file_header(boundary, paramname, filesize, filename=None, filetype=None): - """Returns the leading data for a multipart/form-data field that contains - file data. - - ``boundary`` is the boundary string used throughout a single request to - separate variables. - - ``paramname`` is the name of the variable in this request. - - ``filesize`` is the size of the file data. - - ``filename`` if specified is the filename to give to this field. This - field is only useful to the server for determining the original filename. - - ``filetype`` if specified is the MIME type of this file. - - The actual file data should be sent after this header has been sent. - """ - - return MultipartParam( - paramname, filesize=filesize, filename=filename, filetype=filetype - ).encode_hdr(boundary) - - -def get_body_size(params, boundary): - """Returns the number of bytes that the multipart/form-data encoding - of ``params`` will be.""" - size = sum(p.get_size(boundary) for p in MultipartParam.from_params(params)) - return size + len(boundary) + 6 - - -def get_headers(params, boundary): - """Returns a dictionary with Content-Type and Content-Length headers - for the multipart/form-data encoding of ``params``.""" - headers = {} - boundary = urllib.quote_plus(boundary) - headers["Content-Type"] = "multipart/form-data; boundary=%s" % boundary - headers["Content-Length"] = str(get_body_size(params, boundary)) - return headers - - -class multipart_yielder: - def __init__(self, params, boundary, cb): - self.params = params - self.boundary = boundary - self.cb = cb - - self.i = 0 - self.p = None - self.param_iter = None - self.current = 0 - self.total = get_body_size(params, boundary) - - def __iter__(self): - return self - - def __next__(self): - """generator function to yield multipart/form-data representation - of parameters""" - if self.param_iter is not None: - try: - block = next(self.param_iter) - self.current += len(block) - if self.cb: - self.cb(self.p, self.current, self.total) - return block - except StopIteration: - self.p = None - self.param_iter = None - - if self.i is None: - raise StopIteration - elif self.i >= len(self.params): - self.param_iter = None - self.p = None - self.i = None - block = "--%s--\r\n" % self.boundary - self.current += len(block) - if self.cb: - self.cb(self.p, self.current, self.total) - return block - - self.p = self.params[self.i] - self.param_iter = self.p.iter_encode(self.boundary) - self.i += 1 - return next(self) - - def reset(self): - self.i = 0 - self.current = 0 - for param in self.params: - param.reset() - - -def multipart_encode(params, boundary=None, cb=None): - """Encode ``params`` as multipart/form-data. - - ``params`` should be a sequence of (name, value) pairs or MultipartParam - objects, or a mapping of names to values. - Values are either strings parameter values, or file-like objects to use as - the parameter value. The file-like objects must support .read() and either - .fileno() or both .seek() and .tell(). - - If ``boundary`` is set, then it as used as the MIME boundary. Otherwise - a randomly generated boundary will be used. In either case, if the - boundary string appears in the parameter values a ValueError will be - raised. - - If ``cb`` is set, it should be a callback which will get called as blocks - of data are encoded. It will be called with (param, current, total), - indicating the current parameter being encoded, the current amount encoded, - and the total amount to encode. - - Returns a tuple of `datagen`, `headers`, where `datagen` is a - generator that will yield blocks of data that make up the encoded - parameters, and `headers` is a dictionary with the associated - Content-Type and Content-Length headers. - - Examples: - - >>> datagen, headers = multipart_encode( [("key", "value1"), ("key", "value2")] ) - >>> s = "".join(datagen) - >>> assert "value2" in s and "value1" in s - - >>> p = MultipartParam("key", "value2") - >>> datagen, headers = multipart_encode( [("key", "value1"), p] ) - >>> s = "".join(datagen) - >>> assert "value2" in s and "value1" in s - - >>> datagen, headers = multipart_encode( {"key": "value1"} ) - >>> s = "".join(datagen) - >>> assert "value2" not in s and "value1" in s - - """ - if boundary is None: - boundary = gen_boundary() - else: - boundary = urllib.quote_plus(boundary) - - headers = get_headers(params, boundary) - params = MultipartParam.from_params(params) - - return multipart_yielder(params, boundary, cb), headers diff --git a/yt/utilities/poster/streaminghttp.py b/yt/utilities/poster/streaminghttp.py deleted file mode 100644 index 24443094017..00000000000 --- a/yt/utilities/poster/streaminghttp.py +++ /dev/null @@ -1,222 +0,0 @@ -"""Streaming HTTP uploads module. - -This module extends the standard httplib and urllib2 objects so that -iterable objects can be used in the body of HTTP requests. - -In most cases all one should have to do is call :func:`register_openers()` -to register the new streaming http handlers which will take priority over -the default handlers, and then you can use iterable objects in the body -of HTTP requests. - -**N.B.** You must specify a Content-Length header if using an iterable object -since there is no way to determine in advance the total size that will be -yielded, and there is no way to reset an iterator. - -Example usage: - ->>> from StringIO import StringIO ->>> import urllib2, poster.streaminghttp - ->>> opener = poster.streaminghttp.register_openers() - ->>> s = "Test file data" ->>> f = StringIO(s) - ->>> req = urllib2.Request("http://localhost:5000", f, -... {'Content-Length': str(len(s))}) -""" - - -import http.client as http_client -import socket -import sys -import urllib - -__all__ = [ - "StreamingHTTPConnection", - "StreamingHTTPRedirectHandler", - "StreamingHTTPHandler", - "register_openers", -] - -if hasattr(http_client, "HTTPS"): - __all__.extend(["StreamingHTTPSHandler", "StreamingHTTPSConnection"]) - - -class _StreamingHTTPMixin: - """Mixin class for HTTP and HTTPS connections that implements a streaming - send method.""" - - def send(self, value): - """Send ``value`` to the server. - - ``value`` can be a string object, a file-like object that supports - a .read() method, or an iterable object that supports a .next() - method. - """ - # Based on python 2.6's httplib.HTTPConnection.send() - if self.sock is None: - if self.auto_open: - self.connect() - else: - raise http_client.NotConnected() - - # send the data to the server. if we get a broken pipe, then close - # the socket. we want to reconnect when somebody tries to send again. - # - # NOTE: we DO propagate the error, though, because we cannot simply - # ignore the error... the caller will know if they can retry. - if self.debuglevel > 0: - print("send:", repr(value)) - try: - blocksize = 8192 - if hasattr(value, "read"): - if hasattr(value, "seek"): - value.seek(0) - if self.debuglevel > 0: - print("sendIng a read()able") - data = value.read(blocksize) - while data: - self.sock.sendall(data) - data = value.read(blocksize) - elif hasattr(value, "next"): - if hasattr(value, "reset"): - value.reset() - if self.debuglevel > 0: - print("sendIng an iterable") - for data in value: - self.sock.sendall(data) - else: - self.sock.sendall(value) - except socket.error as v: - if v[0] == 32: # Broken pipe - self.close() - raise - - -class StreamingHTTPConnection(_StreamingHTTPMixin, http_client.HTTPConnection): - """Subclass of `httplib.HTTPConnection` that overrides the `send()` method - to support iterable body objects""" - - pass - - -class StreamingHTTPRedirectHandler(urllib.request.HTTPRedirectHandler): - """Subclass of `urllib2.HTTPRedirectHandler` that overrides the - `redirect_request` method to properly handle redirected POST requests - - This class is required because python 2.5's HTTPRedirectHandler does - not remove the Content-Type or Content-Length headers when requesting - the new resource, but the body of the original request is not preserved. - """ - - handler_order = urllib.request.HTTPRedirectHandler.handler_order - 1 - - # From python2.6 urllib2's HTTPRedirectHandler - def redirect_request(self, req, fp, code, msg, headers, newurl): - """Return a Request or None in response to a redirect. - - This is called by the http_error_30x methods when a - redirection response is received. If a redirection should - take place, return a new Request to allow http_error_30x to - perform the redirect. Otherwise, raise HTTPError if no-one - else should try to handle this url. Return None if you can't - but another Handler might. - """ - m = req.get_method() - if ( - code in (301, 302, 303, 307) - and m in ("GET", "HEAD") - or code in (301, 302, 303) - and m == "POST" - ): - # Strictly (according to RFC 2616), 301 or 302 in response - # to a POST MUST NOT cause a redirection without confirmation - # from the user (of urllib2, in this case). In practice, - # essentially all clients do redirect in this case, so we - # do the same. - # be conciliant with URIs containing a space - newurl = newurl.replace(" ", "%20") - newheaders = dict( - (k, v) - for k, v in list(req.headers.items()) - if k.lower() not in ("content-length", "content-type") - ) - return urllib.request.Request( - newurl, - headers=newheaders, - origin_req_host=req.get_origin_req_host(), - unverifiable=True, - ) - else: - raise urllib.error.HTTPError(req.get_full_url(), code, msg, headers, fp) - - -class StreamingHTTPHandler(urllib.request.HTTPHandler): - """Subclass of `urllib2.HTTPHandler` that uses - StreamingHTTPConnection as its http connection class.""" - - handler_order = urllib.request.HTTPHandler.handler_order - 1 - - def http_open(self, req): - """Open a StreamingHTTPConnection for the given request""" - return self.do_open(StreamingHTTPConnection, req) - - def http_request(self, req): - """Handle a HTTP request. Make sure that Content-Length is specified - if we're using an iterable value""" - # Make sure that if we're using an iterable object as the request - # body, that we've also specified Content-Length - if hasattr(req, "data"): - data = req.data - if hasattr(data, "read") or hasattr(data, "next"): - if not req.has_header("Content-length"): - raise ValueError("No Content-Length specified for iterable body") - return urllib.request.HTTPHandler.do_request_(self, req) - - -if hasattr(http_client, "HTTPS"): - - class StreamingHTTPSConnection(_StreamingHTTPMixin, http_client.HTTPSConnection): - """Subclass of `httplib.HTTSConnection` that overrides the `send()` - method to support iterable body objects""" - - class StreamingHTTPSHandler(urllib.request.HTTPSHandler): - """Subclass of `urllib2.HTTPSHandler` that uses - StreamingHTTPSConnection as its http connection class.""" - - handler_order = urllib.request.HTTPSHandler.handler_order - 1 - - def https_open(self, req): - return self.do_open(StreamingHTTPSConnection, req) - - def https_request(self, req): - # Make sure that if we're using an iterable object as the request - # body, that we've also specified Content-Length - if hasattr(req, "data")(req): - data = req.data - if hasattr(data, "read") or hasattr(data, "next"): - if not req.has_header("Content-length"): - raise ValueError( - "No Content-Length specified for iterable body" - ) - return urllib.request.HTTPSHandler.do_request_(self, req) - - -def get_handlers(): - handlers = [StreamingHTTPHandler, StreamingHTTPRedirectHandler] - if hasattr(http_client, "HTTPS"): - handlers.append(StreamingHTTPSHandler) - return handlers - - -def register_openers(): - """Register the streaming http handlers in the global urllib2 default - opener object. - - Returns the created OpenerDirector object.""" - opener = urllib.request.build_opener(*get_handlers()) - - urllib.request.install_opener(opener) - - return opener From 25d5568efedbf804a6468913a8e833bd3d514d5e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 09:24:38 +0200 Subject: [PATCH 165/653] remove the only class method that relies on poster --- yt/utilities/minimal_representation.py | 58 +------------------------- 1 file changed, 1 insertion(+), 57 deletions(-) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index 7834c42d706..0a6022c6b63 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -2,18 +2,12 @@ import json import os import pickle -import sys -import urllib -from tempfile import TemporaryFile from uuid import uuid4 import numpy as np -from yt.config import ytcfg from yt.funcs import compare_dicts, get_pbar, iterable from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.exceptions import YTHubRegisterError -from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5 @@ -155,57 +149,7 @@ def restore(self, storage, ds): pass def upload(self): - api_key = ytcfg.get("yt", "hub_api_key") - url = ytcfg.get("yt", "hub_url") - if api_key == "": - raise YTHubRegisterError - metadata, (final_name, chunks) = self._generate_post() - if hasattr(self, "_ds_mrep"): - self._ds_mrep.upload() - for i in metadata: - if isinstance(metadata[i], np.ndarray): - metadata[i] = metadata[i].tolist() - elif hasattr(metadata[i], "dtype"): - metadata[i] = np.asscalar(metadata[i]) - metadata["obj_type"] = self.type - if len(chunks) == 0: - chunk_info = {"chunks": []} - else: - chunk_info = {"final_name": final_name, "chunks": []} - for cn, cv in chunks: - chunk_info["chunks"].append((cn, cv.size * cv.itemsize)) - metadata = json.dumps(metadata) - chunk_info = json.dumps(chunk_info) - datagen, headers = multipart_encode( - {"metadata": metadata, "chunk_info": chunk_info, "api_key": api_key} - ) - request = urllib.request.Request(url, datagen, headers) - # Actually do the request, and get the response - try: - rv = urllib.request.urlopen(request).read() - except urllib.error.HTTPError as ex: - if ex.code == 401: - mylog.error("You must create an API key before uploading.") - mylog.error("https://data.yt-project.org/getting_started.html") - return - else: - raise ex - uploader_info = json.loads(rv) - new_url = url + "/handler/%s" % uploader_info["handler_uuid"] - for i, (cn, cv) in enumerate(chunks): - f = TemporaryFile() - np.save(f, cv) - f.seek(0) - pbar = UploaderBar("%s, % 2i/% 2i" % (self.type, i + 1, len(chunks))) - datagen, headers = multipart_encode({"chunk_data": f}, cb=pbar) - request = urllib.request.Request(new_url, datagen, headers) - rv = urllib.request.urlopen(request).read() - - datagen, headers = multipart_encode({"status": "FINAL"}) - request = urllib.request.Request(new_url, datagen, headers) - rv = json.loads(urllib.request.urlopen(request).read()) - mylog.info("Upload succeeded! View here: %s", rv["url"]) - return rv + raise NotImplementedError("This method hasn't been ported to python 3") def load(self, storage): return pickle.load(open(storage, "r")) From 7e7eeffcefcc7f464d792c02dafd024033ea7982 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 10:47:21 +0200 Subject: [PATCH 166/653] cleanup rotten code --- yt/data_objects/construction_data_containers.py | 3 --- yt/data_objects/selection_data_containers.py | 3 --- yt/data_objects/static_output.py | 3 --- 3 files changed, 9 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index b75518831df..84f921b9fc4 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -494,9 +494,6 @@ def __init__( def _mrep(self): return MinimalProjectionData(self) - def hub_upload(self): - self._mrep.upload() - def deserialize(self, fields): if not ytcfg.getboolean("yt", "serialize"): return False diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index e21acffeb07..377bc27888a 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -374,9 +374,6 @@ def _generate_container_field(self, field): def _mrep(self): return MinimalSliceData(self) - def hub_upload(self): - self._mrep.upload() - def to_pw(self, fields=None, center="c", width=None, origin="center-window"): r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this object. diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index a89e4232739..0ad62cd35c4 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -386,9 +386,6 @@ def _mrep(self): def _skip_cache(self): return False - def hub_upload(self): - self._mrep.upload() - @classmethod def _guess_candidates(cls, base, directories, files): """ From 803513778620c5c4d056d06306d808dc2201bea4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 10:49:19 +0200 Subject: [PATCH 167/653] remove pickler/unpickler methods in MinimalRepresentation because pickle is unsafe --- yt/utilities/minimal_representation.py | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index 0a6022c6b63..ed8c628114e 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -1,7 +1,6 @@ import abc import json import os -import pickle from uuid import uuid4 import numpy as np @@ -152,11 +151,10 @@ def upload(self): raise NotImplementedError("This method hasn't been ported to python 3") def load(self, storage): - return pickle.load(open(storage, "r")) + raise NotImplementedError("This method hasn't been ported to python 3") def dump(self, storage): - with open(storage, "w") as fh: - pickle.dump(self, fh) + raise NotImplementedError("This method hasn't been ported to python 3") class FilteredRepresentation(MinimalRepresentation): From b5f56ebd8060818667432992e5be5eef15737ff6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 28 Jul 2020 17:39:56 +0200 Subject: [PATCH 168/653] remove now unused UploaderBar class --- yt/utilities/minimal_representation.py | 16 +--------------- 1 file changed, 1 insertion(+), 15 deletions(-) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index ed8c628114e..a993bbe7465 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -5,7 +5,7 @@ import numpy as np -from yt.funcs import compare_dicts, get_pbar, iterable +from yt.funcs import compare_dicts, iterable from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.on_demand_imports import _h5py as h5 @@ -63,20 +63,6 @@ def _deserialize_from_h5(g, ds): return result -class UploaderBar: - pbar = None - - def __init__(self, my_name=""): - self.my_name = my_name - - def __call__(self, name, prog, total): - if self.pbar is None: - self.pbar = get_pbar("Uploading %s " % self.my_name, total) - self.pbar.update(prog) - if prog == total: - self.pbar.finish() - - class ContainerClass: pass From 63a031ecb43fd0ce6d0ae2a8b1d9303d7e9a0325 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Tue, 28 Jul 2020 10:59:37 -0500 Subject: [PATCH 169/653] Fix conda_env embree checks --- setupext.py | 12 +++++++----- 1 file changed, 7 insertions(+), 5 deletions(-) diff --git a/setupext.py b/setupext.py index d51b10d3713..fc5809b30b5 100644 --- a/setupext.py +++ b/setupext.py @@ -143,17 +143,13 @@ def check_for_pyembree(std_libs): embree_libs = [] embree_aliases = {} try: - fn = resource_filename("pyembree", "rtcore.pxd") + _ = resource_filename("pyembree", "rtcore.pxd") except ImportError: return embree_libs, embree_aliases embree_prefix = os.path.abspath(read_embree_location()) embree_inc_dir = os.path.join(embree_prefix, "include") embree_lib_dir = os.path.join(embree_prefix, "lib") - if in_conda_env(): - conda_basedir = os.path.dirname(os.path.dirname(sys.executable)) - embree_inc_dir.append(os.path.join(conda_basedir, "include")) - embree_lib_dir.append(os.path.join(conda_basedir, "lib")) if _platform == "darwin": embree_lib_name = "embree.2" @@ -164,6 +160,12 @@ def check_for_pyembree(std_libs): embree_aliases["EMBREE_LIB_DIR"] = [embree_lib_dir] embree_aliases["EMBREE_LIBS"] = std_libs + [embree_lib_name] embree_libs += ["yt/utilities/lib/embree_mesh/*.pyx"] + + if in_conda_env(): + conda_basedir = os.path.dirname(os.path.dirname(sys.executable)) + embree_aliases["EMBREE_INC_DIR"].append(os.path.join(conda_basedir, "include")) + embree_aliases["EMBREE_LIB_DIR"].append(os.path.join(conda_basedir, "lib")) + return embree_libs, embree_aliases From e6b5ccec0f431d23628a0b912cbd4c2a13b624a3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 29 Jul 2020 10:11:16 +0200 Subject: [PATCH 170/653] bump isort minor version --- .pre-commit-config.yaml | 2 +- tests/lint_requirements.txt | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 1419abcb0b9..acdc055eefe 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -4,7 +4,7 @@ - id: black language_version: python3.7 - repo: https://github.com/timothycrosley/isort - rev: '5.2.0' # keep in sync with tests/lint_requirements.txt + rev: '5.2.1' # keep in sync with tests/lint_requirements.txt hooks: - id: isort - repo: https://gitlab.com/pycqa/flake8 diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index f5f3dbae54c..aaf97225adb 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -2,6 +2,6 @@ flake8==3.8.1 # keep in sync with .pre-commit-config.yaml mccabe==0.6.1 pycodestyle==2.6.0 pyflakes==2.2.0 -isort==5.2.0 # keep in sync with .pre-commit-config.yaml +isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear From 3b4d43c9872d7b72b068343e14be68341a03b1ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 29 Jul 2020 12:50:09 +0200 Subject: [PATCH 171/653] cleanup: remove seemingly unused and deprecated save_object and load_object methods --- yt/data_objects/data_containers.py | 16 ------------ yt/data_objects/tests/test_data_containers.py | 19 -------------- yt/geometry/geometry_handler.py | 25 ------------------- 3 files changed, 60 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index d19d8b77b05..fb7e92a5574 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -1,6 +1,5 @@ import itertools import os -import shelve import uuid import weakref from collections import defaultdict @@ -509,21 +508,6 @@ def write_out(self, filename, fields=None, format="%0.16e"): field_data[:, line].tofile(fid, sep="\t", format=format) fid.write("\n") - def save_object(self, name, filename=None): - """ - Save an object. If *filename* is supplied, it will be stored in - a :mod:`shelve` file of that name. Otherwise, it will be stored via - :meth:`yt.data_objects.api.GridIndex.save_object`. - """ - if filename is not None: - ds = shelve.open(filename, protocol=-1) - if name in ds: - mylog.info("Overwriting %s in %s", name, filename) - ds[name] = self - ds.close() - else: - self.index.save_object(self, name) - def to_dataframe(self, fields): r"""Export a data object to a :class:`~pandas.DataFrame`. diff --git a/yt/data_objects/tests/test_data_containers.py b/yt/data_objects/tests/test_data_containers.py index 7c1461b9ab7..bcf6547df55 100644 --- a/yt/data_objects/tests/test_data_containers.py +++ b/yt/data_objects/tests/test_data_containers.py @@ -1,5 +1,4 @@ import os -import shelve import shutil import tempfile import unittest @@ -78,24 +77,6 @@ def test_write_out(self): assert_equal(keys, file_row_1) assert_array_equal(data, file_row_2) - def test_save_object(self): - ds = fake_random_ds(16) - sp = ds.sphere(ds.domain_center, 0.25) - sp.save_object("my_sphere_1", filename="test_save_obj") - obj = shelve.open("test_save_obj", protocol=-1) - loaded_sphere = obj["my_sphere_1"][1] - obj.close() - assert_array_equal(loaded_sphere.center, sp.center) - assert_equal(loaded_sphere.radius, sp.radius) - for k in loaded_sphere._key_fields: - assert_array_equal(loaded_sphere[k], sp[k]) - - # Object is saved but retrieval is not working - # sp.save_object("my_sphere_2") - # loaded_sphere = ds.index.load_object("my_sphere_2") - # for k in loaded_sphere._key_fields: - # assert_array_equal(loaded_sphere[k], sp[k]) - @requires_module("pandas") def test_to_dataframe(self): fields = ["density", "velocity_z"] diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index ae7b0f0534d..a37954a419d 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -1,12 +1,10 @@ import abc import os -import pickle import weakref import numpy as np from yt.config import ytcfg -from yt.funcs import iterable from yt.units.yt_array import YTArray, uconcatenate from yt.utilities.exceptions import YTFieldNotFound from yt.utilities.io_handler import io_registry @@ -147,29 +145,6 @@ def _reload_data_file(self, *args, **kwargs): del self._data_file self._data_file = h5py.File(self.__data_filename, self._data_mode) - def save_object(self, obj, name): - """ - Save an object (*obj*) to the data_file using the Pickle protocol, - under the name *name* on the node /Objects. - """ - s = pickle.dumps(obj, protocol=-1) - self.save_data(np.array(s, dtype="c"), "/Objects", name, force=True) - - def load_object(self, name): - """ - Load and return and object from the data_file using the Pickle protocol, - under the name *name* on the node /Objects. - """ - obj = self.get_data("/Objects", name) - if obj is None: - return - obj = pickle.loads(obj.value) - if iterable(obj) and len(obj) == 2: - obj = obj[1] # Just the object, not the ds - if hasattr(obj, "_fix_pickle"): - obj._fix_pickle() - return obj - def get_data(self, node, name): """ Return the dataset with a given *name* located at *node* in the From f4339608c41cbb0c1ebc3696387378d353c349b7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 29 Jul 2020 15:38:05 +0200 Subject: [PATCH 172/653] remove a unused test file --- yt/data_objects/tests/test_pickle.py | 48 ---------------------------- 1 file changed, 48 deletions(-) delete mode 100644 yt/data_objects/tests/test_pickle.py diff --git a/yt/data_objects/tests/test_pickle.py b/yt/data_objects/tests/test_pickle.py deleted file mode 100644 index 16afb91839f..00000000000 --- a/yt/data_objects/tests/test_pickle.py +++ /dev/null @@ -1,48 +0,0 @@ -import os -import pickle -import tempfile - -from yt.testing import assert_equal, fake_random_ds - - -def setup(): - """Test specific setup.""" - from yt.config import ytcfg - - ytcfg["yt", "__withintesting"] = "True" - - -def test_save_load_pickle(): - """Main test for loading pickled objects""" - return # Until boolean regions are implemented we can't test this - test_ds = fake_random_ds(64) - - # create extracted region from boolean (fairly complex object) - center = (test_ds.domain_left_edge + test_ds.domain_right_edge) / 2 - sp_outer = test_ds.sphere(center, test_ds.domain_width[0]) - sp_inner = test_ds.sphere(center, test_ds.domain_width[0] / 10.0) - sp_boolean = test_ds.boolean([sp_outer, "NOT", sp_inner]) - - minv, maxv = sp_boolean.quantities["Extrema"]("density")[0] - contour_threshold = min(minv * 10.0, 0.9 * maxv) - - contours = sp_boolean.extract_connected_sets( - "density", 1, contour_threshold, maxv + 1, log_space=True, cache=True - ) - - # save object - cpklfile = tempfile.NamedTemporaryFile(delete=False) - pickle.dump(contours[1][0], cpklfile) - cpklfile.close() - - # load object - test_load = pickle.load(open(cpklfile.name, "rb")) - - assert_equal.description = "%s: File was pickle-loaded successfully" % __name__ - assert_equal(test_load is not None, True) - assert_equal.description = ( - "%s: Length of pickle-loaded connected set object" % __name__ - ) - assert_equal(len(contours[1][0]), len(test_load)) - - os.remove(cpklfile.name) From e9e07b14a7c82c9eb84e7cb47c16243df3cf6819 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 29 Jul 2020 08:40:59 -0500 Subject: [PATCH 173/653] Fix a call to a non-existing ploting routine --- doc/source/visualizing/callbacks.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/source/visualizing/callbacks.rst b/doc/source/visualizing/callbacks.rst index 78571efa27b..b7ccace4004 100644 --- a/doc/source/visualizing/callbacks.rst +++ b/doc/source/visualizing/callbacks.rst @@ -94,7 +94,7 @@ dataset from AMRVAC : import yt ds = yt.load("amrvac/bw_polar_2D0000.dat") - s = yt.plot2d(ds, 'density') + s = yt.plot_2d(ds, "density") s.set_background_color("density", "black") # Plot marker and text in data coords From f87fd354d00a722701f1295aced262e230523b93 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 29 Jul 2020 08:43:38 -0500 Subject: [PATCH 174/653] Cast LineBuffer start/stop point to float. Fixes #2794 --- yt/visualization/line_plot.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/line_plot.py b/yt/visualization/line_plot.py index bc9cc7bf308..47d50ffacba 100644 --- a/yt/visualization/line_plot.py +++ b/yt/visualization/line_plot.py @@ -434,7 +434,7 @@ def _validate_point(point, ds, start=False): if not iterable(point): raise RuntimeError("Input point must be array-like") if not isinstance(point, YTArray): - point = ds.arr(point, "code_length") + point = ds.arr(point, "code_length", dtype=np.float64) if len(point.shape) != 1: raise RuntimeError("Input point must be a 1D array") if point.shape[0] < ds.dimensionality: From 152a1ad26b6c6468576b909cb6eefaf6f20d0d4d Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 29 Jul 2020 08:44:44 -0500 Subject: [PATCH 175/653] Accessing mpl.cm.cmap_d is deprecated since mpl-3.3.0 --- yt/visualization/color_maps.py | 14 ++++++++++---- yt/visualization/eps_writer.py | 2 +- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index 9748d307890..aaa70508477 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -508,7 +508,11 @@ def add_colormap(name, cdict): # Add colormaps in _colormap_data.py that weren't defined here _vs = np.linspace(0, 1, 256) for k, v in list(_cm.color_map_luts.items()): - if k not in yt_colormaps and k not in mcm.cmap_d: + try: + colormaps = mcm._cmap_registry + except AttributeError: # mpl < 3.3.0 + colormaps = mcm.cmap_d + if k not in yt_colormaps and k not in colormaps: cdict = { "red": np.transpose([_vs, v[0], v[0]]), "green": np.transpose([_vs, v[1], v[1]]), @@ -565,12 +569,14 @@ def show_colormaps(subset="all", filename=None): If filename is set, then it will save the colormaps to an output file. If it is not set, it will "show" the result interactively. """ - from matplotlib import cm as cm, pyplot as plt + from matplotlib import pyplot as plt a = np.outer(np.arange(0, 1, 0.01), np.ones(10)) if subset == "all": maps = [ - m for m in cm.cmap_d if (not m.startswith("idl")) & (not m.endswith("_r")) + m + for m in plt.colormaps() + if (not m.startswith("idl")) & (not m.endswith("_r")) ] elif subset == "yt_native": maps = [ @@ -580,7 +586,7 @@ def show_colormaps(subset="all", filename=None): ] else: try: - maps = [m for m in cm.cmap_d if m in subset] + maps = [m for m in plt.colormaps() if m in subset] if len(maps) == 0: raise AttributeError except AttributeError: diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 40dfac05320..251c90efaba 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -744,7 +744,7 @@ def colorbar( # Convert the colormap into a string x = np.linspace(1, 0, 256) - cm_string = cm.cmap_d[name](x, bytes=True)[:, 0:3].tostring() + cm_string = cm.get_cmap[name](x, bytes=True)[:, 0:3].tostring() cmap_im = pyx.bitmap.image(imsize[0], imsize[1], "RGB", cm_string) if orientation == "top" or orientation == "bottom": From 740c743a173397b15b9b52fdfe2d383d75fb849f Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Wed, 29 Jul 2020 11:30:15 -0500 Subject: [PATCH 176/653] update to new workflow repo location --- .github/workflows/slash-command-dispatch.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index d6e4d34420c..314d6b782fc 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -14,4 +14,4 @@ jobs: help format rebase - repository: matthewturk/slash-command-processor + repository: yt-project/slash-command-processor From 38d93c6dac05d1caf51492201d88616a638f9213 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Wed, 29 Jul 2020 13:14:16 -0500 Subject: [PATCH 177/653] Re-add isort and black --- .github/workflows/slash-command-dispatch.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index 314d6b782fc..73b71c17ef2 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -14,4 +14,6 @@ jobs: help format rebase + isort + black repository: yt-project/slash-command-processor From 384a8380ef436f97adb462c5ae4abda00030a9f8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 31 Jul 2020 11:51:11 +0200 Subject: [PATCH 178/653] add support for gradient computation of several fields at a at time, and allow passing them with fname only --- yt/data_objects/static_output.py | 82 ++++++++++++++++++++------------ yt/fields/tests/test_fields.py | 32 +++++++++++++ 2 files changed, 83 insertions(+), 31 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 0ad62cd35c4..12b0b196586 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1565,54 +1565,74 @@ def add_smoothed_particle_field( """ issue_deprecation_warning("This method is deprecated. " + DEP_MSG_SMOOTH_FIELD) - def add_gradient_fields(self, input_field): + def add_gradient_fields(self, fields=None, input_field=None): """Add gradient fields. - Creates four new grid-based fields that represent the components of - the gradient of an existing field, plus an extra field for the magnitude - of the gradient. Currently only supported in Cartesian geometries. The + Creates four new grid-based fields that represent the components of the gradient + of an existing field, plus an extra field for the magnitude of the gradient. The gradient is computed using second-order centered differences. Parameters ---------- - input_field : tuple - The field name tuple of the particle field the deposited field will - be created from. This must be a field name tuple so yt can - appropriately infer the correct field type. + fields : str or tuple(str, str), or a list of the previous + Label(s) for at least one field. Can either represent a tuple + (, ) or simply the field name. + Warning: several field types may match the provided field name, + in which case the first one discovered internally is used. Returns ------- A list of field name tuples for the newly created fields. + Raises + ------ + YTFieldNotParsable + If fields are not parsable to yt field keys. + + YTFieldNotFound : + If at least one field can not be identified. + Examples -------- - >>> grad_fields = ds.add_gradient_fields(("gas","temperature")) - >>> print(grad_fields) - [('gas', 'temperature_gradient_x'), - ('gas', 'temperature_gradient_y'), - ('gas', 'temperature_gradient_z'), - ('gas', 'temperature_gradient_magnitude')] + >>> grad_fields = ds.add_gradient_fields(("gas","temperature")) print(grad_fields) + >>> [('gas', 'temperature_gradient_x'), ('gas', 'temperature_gradient_y'), ('gas', + >>> 'temperature_gradient_z'), ('gas', 'temperature_gradient_magnitude')] - Note that the above example assumes ds.geometry == 'cartesian'. In general, the function - will create gradients components along the axes of the dataset coordinate system. - For instance, with cylindrical data, one gets 'temperature_gradient_' + Note that the above example assumes ds.geometry == 'cartesian'. In general, the + function will create gradients components along the axes of the dataset coordinate + system. For instance, with cylindrical data, one gets + 'temperature_gradient_' """ + if input_field is not None: + issue_deprecation_warning( + "keyword argument 'input_field' is deprecated in favor of 'fields' " + "and will be removed in a future version of yt." + ) + if fields is not None: + raise TypeError( + "Can not use both 'fields' and 'input_field' keyword arguments" + ) + fields = input_field + if fields is None: + raise TypeError("Missing required positional argument: fields") + self.index - if not isinstance(input_field, tuple): - raise TypeError - ftype, input_field = input_field[0], input_field[1] - units = self.field_info[ftype, input_field].units - setup_gradient_fields(self.field_info, (ftype, input_field), units) - # Now we make a list of the fields that were just made, to check them - # and to return them - grad_fields = [ - (ftype, input_field + "_gradient_%s" % suffix) - for suffix in self.coordinates.axis_order - ] - grad_fields.append((ftype, input_field + "_gradient_magnitude")) - deps, _ = self.field_info.check_derived_fields(grad_fields) - self.field_dependencies.update(deps) + data_obj = self.all_data() + explicit_fields = data_obj._determine_fields(fields) + grad_fields = [] + for ftype, fname in explicit_fields: + units = self.field_info[ftype, fname].units + setup_gradient_fields(self.field_info, (ftype, fname), units) + # Now we make a list of the fields that were just made, to check them + # and to return them + grad_fields += [ + (ftype, "{}_gradient_{}".format(fname, suffix)) + for suffix in self.coordinates.axis_order + ] + grad_fields.append((ftype, "{}_gradient_magnitude".format(fname))) + deps, _ = self.field_info.check_derived_fields(grad_fields) + self.field_dependencies.update(deps) return grad_fields _max_level = None diff --git a/yt/fields/tests/test_fields.py b/yt/fields/tests/test_fields.py index 49280db0e86..b8aee0cec20 100644 --- a/yt/fields/tests/test_fields.py +++ b/yt/fields/tests/test_fields.py @@ -238,6 +238,38 @@ def test_add_gradient_fields(): assert str(ret.units) == "1/cm" +def test_add_gradient_fields_by_fname(): + ds = fake_amr_ds(fields=("density", "temperature")) + actual = ds.add_gradient_fields("density") + expected = [ + ("gas", "density_gradient_x"), + ("gas", "density_gradient_y"), + ("gas", "density_gradient_z"), + ("gas", "density_gradient_magnitude"), + ] + assert_equal(actual, expected) + + +def test_add_gradient_multiple_fields(): + ds = fake_amr_ds(fields=("density", "temperature")) + actual = ds.add_gradient_fields([("gas", "density"), ("gas", "temperature")]) + expected = [ + ("gas", "density_gradient_x"), + ("gas", "density_gradient_y"), + ("gas", "density_gradient_z"), + ("gas", "density_gradient_magnitude"), + ("gas", "temperature_gradient_x"), + ("gas", "temperature_gradient_y"), + ("gas", "temperature_gradient_z"), + ("gas", "temperature_gradient_magnitude"), + ] + assert_equal(actual, expected) + + ds = fake_amr_ds(fields=("density", "temperature")) + actual = ds.add_gradient_fields(["density", "temperature"]) + assert_equal(actual, expected) + + def test_add_gradient_fields_curvilinear(): ds = fake_amr_ds(fields=["density"], geometry="spherical") gfields = ds.add_gradient_fields(("gas", "density")) From 38fd394650ec8a45ed4e853105191361c5fdd074 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 31 Jul 2020 13:56:54 -0500 Subject: [PATCH 179/653] This fixes a non-fatal error in the install script. Since we no longer set the INST_PY3 variable, we don't need to check it. --- doc/install_script.sh | 5 ----- 1 file changed, 5 deletions(-) diff --git a/doc/install_script.sh b/doc/install_script.sh index ca1a7cd4857..62d165a3635 100644 --- a/doc/install_script.sh +++ b/doc/install_script.sh @@ -129,7 +129,6 @@ function write_config echo INST_YT_SOURCE=${INST_YT_SOURCE} > ${CONFIG_FILE} echo INST_GIT=${INST_GIT} >> ${CONFIG_FILE} echo INST_PYX=${INST_PYX} >> ${CONFIG_FILE} - echo INST_PY3=${INST_PY3} >> ${CONFIG_FILE} echo INST_SCIPY=${INST_SCIPY} >> ${CONFIG_FILE} echo INST_EMBREE=${INST_EMBREE} >> ${CONFIG_FILE} echo INST_H5PY=${INST_H5PY} >> ${CONFIG_FILE} @@ -299,10 +298,6 @@ printf "%-18s = %s so I " "INST_YT_SOURCE" "${INST_YT_SOURCE}" get_willwont ${INST_YT_SOURCE} echo "be compiling yt from source" -printf "%-18s = %s so I " "INST_PY3" "${INST_PY3}" -get_willwont ${INST_PY3} -echo "be installing Python 3" - printf "%-18s = %s so I " "INST_GIT" "${INST_GIT}" get_willwont ${INST_GIT} echo "be installing git" From f83cff0880e40623f992c7156f9f81f93ce72cdd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 31 Jul 2020 16:50:40 +0200 Subject: [PATCH 180/653] bugfix: remove B007 from flake8 ignore list and fix all errors detected --- setup.cfg | 1 - .../construction_data_containers.py | 4 +- yt/data_objects/data_containers.py | 24 ++++--- yt/data_objects/level_sets/clump_handling.py | 2 +- yt/data_objects/particle_trajectories.py | 6 +- yt/data_objects/profiles.py | 6 +- yt/data_objects/tests/test_ortho_rays.py | 2 +- yt/data_objects/tests/test_slice.py | 62 +++++++++---------- .../tests/test_sph_data_objects.py | 2 +- yt/fields/tests/test_fields.py | 6 +- yt/frontends/adaptahop/io.py | 4 +- yt/frontends/amrvac/datfile_utils.py | 2 +- yt/frontends/art/data_structures.py | 4 +- yt/frontends/art/io.py | 14 ++--- yt/frontends/artio/data_structures.py | 2 +- yt/frontends/artio/io.py | 2 +- yt/frontends/athena/data_structures.py | 4 +- yt/frontends/athena_pp/data_structures.py | 5 +- yt/frontends/athena_pp/io.py | 2 +- yt/frontends/boxlib/data_structures.py | 8 +-- yt/frontends/chombo/data_structures.py | 6 +- yt/frontends/enzo/io.py | 2 +- yt/frontends/enzo_p/data_structures.py | 6 +- yt/frontends/enzo_p/tests/test_misc.py | 6 +- yt/frontends/fits/misc.py | 2 +- yt/frontends/flash/io.py | 4 +- yt/frontends/gadget/io.py | 2 +- yt/frontends/gadget_fof/io.py | 2 +- yt/frontends/gamer/io.py | 4 +- yt/frontends/gdf/data_structures.py | 2 +- yt/frontends/moab/tests/test_c5.py | 4 +- yt/frontends/open_pmd/data_structures.py | 2 +- yt/frontends/owls_subfind/io.py | 2 +- yt/frontends/ramses/data_structures.py | 8 +-- yt/frontends/ramses/field_handlers.py | 8 +-- yt/frontends/ramses/tests/test_outputs.py | 2 +- yt/frontends/sdf/io.py | 10 +-- yt/frontends/stream/data_structures.py | 4 +- yt/frontends/stream/io.py | 4 +- yt/frontends/swift/io.py | 2 +- yt/frontends/tipsy/io.py | 10 ++- yt/frontends/ytdata/io.py | 4 +- yt/geometry/geometry_handler.py | 2 +- yt/geometry/grid_geometry_handler.py | 2 +- yt/geometry/tests/test_particle_octree.py | 4 +- yt/geometry/unstructured_mesh_handler.py | 2 +- yt/pmods.py | 2 +- yt/testing.py | 2 +- yt/utilities/answer_testing/framework.py | 4 +- yt/utilities/flagging_methods.py | 2 +- yt/utilities/fortran_utils.py | 8 +-- yt/utilities/lib/tests/test_geometry_utils.py | 2 +- yt/utilities/nodal_data_utils.py | 6 +- .../parallel_analysis_interface.py | 8 +-- yt/utilities/tests/test_selectors.py | 2 +- yt/visualization/eps_writer.py | 8 +-- yt/visualization/plot_container.py | 4 +- yt/visualization/plot_modifications.py | 2 +- yt/visualization/profile_plotter.py | 8 +-- yt/visualization/tests/test_particle_plot.py | 2 +- yt/visualization/volume_rendering/UBVRI.py | 2 +- .../volume_rendering/interactive_loop.py | 8 +-- .../volume_rendering/off_axis_projection.py | 2 +- .../volume_rendering/old_camera.py | 10 +-- .../volume_rendering/render_source.py | 2 +- yt/visualization/volume_rendering/scene.py | 8 +-- .../volume_rendering/tests/test_varia.py | 2 +- .../tests/test_vr_orientation.py | 6 +- 68 files changed, 178 insertions(+), 190 deletions(-) diff --git a/setup.cfg b/setup.cfg index 162c89398e1..ac482fd533c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -37,7 +37,6 @@ ignore = E203, # Whitespace before ':' (black compatibility) W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' B005, # "Using .strip() with multi-character strings is misleading the reader." - B007, # "Loop control variable not used within the loop body." TODO: handle B302, # this is a python 3 compatibility warning, not relevant since don't support python 2 anymore jobs=8 diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 84f921b9fc4..e09e3aff35c 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -1542,7 +1542,7 @@ def get_data(self, fields=None, sample_type="face", no_ghost=False): mylog.info("Extracting (sampling: %s)" % (fields,)) verts = [] samples = [] - for io_chunk in parallel_objects(self.data_source.chunks([], "io")): + for _io_chunk in parallel_objects(self.data_source.chunks([], "io")): for block, mask in self.data_source.blocks: my_verts = self._extract_isocontours_from_grid( block, @@ -1652,7 +1652,7 @@ def calculate_flux(self, field_x, field_y, field_z, fluxing_field=None): """ flux = 0.0 mylog.info("Fluxing %s", fluxing_field) - for io_chunk in parallel_objects(self.data_source.chunks([], "io")): + for _io_chunk in parallel_objects(self.data_source.chunks([], "io")): for block, mask in self.data_source.blocks: flux += self._calculate_flux_in_grid( block, mask, field_x, field_y, field_z, fluxing_field diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 0ea775ff514..c8e0e515fc5 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -342,10 +342,8 @@ def _generate_spatial_fluid(self, field, ngz): if ngz == 0: deps = self._identify_dependencies([field], spatial=True) deps = self._determine_fields(deps) - for io_chunk in self.chunks([], "io", cache=False): - for i, chunk in enumerate( - self.chunks([], "spatial", ngz=0, preload_fields=deps) - ): + for _io_chunk in self.chunks([], "io", cache=False): + for _chunk in self.chunks([], "spatial", ngz=0, preload_fields=deps): o = self._current_chunk.objs[0] if accumulate: rv = self.ds.arr(np.empty(o.ires.size, dtype="float64"), units) @@ -357,7 +355,7 @@ def _generate_spatial_fluid(self, field, ngz): ) else: chunks = self.index._chunk(self, "spatial", ngz=ngz) - for i, chunk in enumerate(chunks): + for chunk in chunks: with self._chunked_read(chunk): gz = self._current_chunk.objs[0] gz.field_parameters = self.field_parameters @@ -393,8 +391,8 @@ def _generate_particle_field(self, field): size = self._count_particles(ftype) rv = self.ds.arr(np.empty(size, dtype="float64"), finfo.units) ind = 0 - for io_chunk in self.chunks([], "io", cache=False): - for i, chunk in enumerate(self.chunks(field, "spatial")): + for _io_chunk in self.chunks([], "io", cache=False): + for _chunk in self.chunks(field, "spatial"): x, y, z = (self[ftype, "particle_position_%s" % ax] for ax in "xyz") if x.size == 0: continue @@ -413,12 +411,12 @@ def _generate_particle_field(self, field): return rv def _count_particles(self, ftype): - for (f1, f2), val in self.field_data.items(): + for (f1, _f2), val in self.field_data.items(): if f1 == ftype: return val.size size = 0 - for io_chunk in self.chunks([], "io", cache=False): - for i, chunk in enumerate(self.chunks([], "spatial")): + for _io_chunk in self.chunks([], "io", cache=False): + for _chunk in self.chunks([], "spatial"): x, y, z = (self[ftype, "particle_position_%s" % ax] for ax in "xyz") if x.size == 0: continue @@ -1518,8 +1516,8 @@ def tiles(self): @property def blocks(self): - for io_chunk in self.chunks([], "io"): - for i, chunk in enumerate(self.chunks([], "spatial", ngz=0)): + for _io_chunk in self.chunks([], "io"): + for _chunk in self.chunks([], "spatial", ngz=0): # For grids this will be a grid object, and for octrees it will # be an OctreeSubset. Note that we delegate to the sub-object. o = self._current_chunk.objs[0] @@ -2871,7 +2869,7 @@ def extract_connected_sets( nj, cids = identify_contours(self, field, cons[level], mv) unique_contours = set([]) for sl_list in cids.values(): - for sl, ff in sl_list: + for _sl, ff in sl_list: unique_contours.update(np.unique(ff)) contour_key = uuid.uuid4().hex # In case we're a cut region already... diff --git a/yt/data_objects/level_sets/clump_handling.py b/yt/data_objects/level_sets/clump_handling.py index cfc09356ccd..994cdd4ac4a 100644 --- a/yt/data_objects/level_sets/clump_handling.py +++ b/yt/data_objects/level_sets/clump_handling.py @@ -155,7 +155,7 @@ def find_children(self, min_val, max_val=None): # the unique values of the contours by examining the list here. unique_contours = set([]) for sl_list in cids.values(): - for sl, ff in sl_list: + for _sl, ff in sl_list: unique_contours.update(np.unique(ff)) contour_key = uuid.uuid4().hex base_object = getattr(self.data, "base_object", self.data) diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 04a0a9a9621..5a2db29cb6a 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -111,7 +111,7 @@ def __init__( mylog.setLevel(old_level) times = [] - for fn, (time, indices, pfields) in sorted(my_storage.items()): + for _fn, (time, _indices, _pfields) in sorted(my_storage.items()): times.append(time) self.times = self.data_series[0].arr([time for time in times], times[0].units) @@ -119,7 +119,7 @@ def __init__( output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) for field in ("particle_position_%s" % ax for ax in "xyz"): - for i, (fn, (time, indices, pfields)) in enumerate( + for i, (_fn, (_time, indices, pfields)) in enumerate( sorted(my_storage.items()) ): try: @@ -291,7 +291,7 @@ def _get_data(self, fields): output_field.fill(np.nan) for field in missing_fields: fd = fds[field] - for i, (fn, (indices, pfield)) in enumerate(sorted(my_storage.items())): + for i, (_fn, (indices, pfield)) in enumerate(sorted(my_storage.items())): output_field[indices, i] = pfield[field] self.field_data[field] = array_like_field(dd_first, output_field.copy(), fd) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index 199c0cd7a30..e15497b73f8 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -143,7 +143,7 @@ def _finalize_storage(self, fields, temp_storage): # We use our main comm here # This also will fill _field_data - for i, field in enumerate(fields): + for i, _field in enumerate(fields): # q values are returned as q * weight but we want just q temp_storage.qvalues[..., i][ temp_storage.used @@ -173,7 +173,7 @@ def _finalize_storage(self, fields, temp_storage): all_weight[all_store[p].used] += all_store[p].weight_values[ all_store[p].used ] - for i, field in enumerate(fields): + for i, _field in enumerate(fields): all_val[..., i][all_store[p].used] += all_store[p].values[..., i][ all_store[p].used ] @@ -938,7 +938,7 @@ def _bin_chunk(self, chunk, fields, storage): elif self.deposition == "cic": func = CICDeposit_2 - for fi, field in enumerate(fields): + for fi, _field in enumerate(fields): if self.weight_field is None: deposit_vals = fdata[:, fi] else: diff --git a/yt/data_objects/tests/test_ortho_rays.py b/yt/data_objects/tests/test_ortho_rays.py index b179761594b..5ba000ed3be 100644 --- a/yt/data_objects/tests/test_ortho_rays.py +++ b/yt/data_objects/tests/test_ortho_rays.py @@ -8,7 +8,7 @@ def test_ortho_ray(): dx = (ds.domain_right_edge - ds.domain_left_edge) / ds.domain_dimensions axes = ["x", "y", "z"] - for ax, an in enumerate(axes): + for ax, _an in enumerate(axes): ocoord = ds.arr(np.random.random(2), "code_length") my_oray = ds.ortho_ray(ax, ocoord) diff --git a/yt/data_objects/tests/test_slice.py b/yt/data_objects/tests/test_slice.py index 4b0711d25ae..a3f8b63a36c 100644 --- a/yt/data_objects/tests/test_slice.py +++ b/yt/data_objects/tests/test_slice.py @@ -39,40 +39,38 @@ def test_slice(pf): uc = [np.unique(c) for c in coords] slc_pos = 0.5 # Some simple slice tests with single grids - for ax, an in enumerate("xyz"): + for ax in range(3): xax = ds.coordinates.x_axis[ax] yax = ds.coordinates.y_axis[ax] - for wf in ["density", None]: - slc = ds.slice(ax, slc_pos) - shifted_slc = ds.slice(ax, slc_pos + grid_eps) - assert_equal(slc["ones"].sum(), slc["ones"].size) - assert_equal(slc["ones"].min(), 1.0) - assert_equal(slc["ones"].max(), 1.0) - assert_equal(np.unique(slc["px"]), uc[xax]) - assert_equal(np.unique(slc["py"]), uc[yax]) - assert_equal(np.unique(slc["pdx"]), 0.5 / dims[xax]) - assert_equal(np.unique(slc["pdy"]), 0.5 / dims[yax]) - pw = slc.to_pw(fields="density") - for p in pw.plots.values(): - tmpfd, tmpname = tempfile.mkstemp(suffix=".png") - os.close(tmpfd) - p.save(name=tmpname) - fns.append(tmpname) - for width in [(1.0, "unitary"), 1.0, ds.quan(0.5, "code_length")]: - frb = slc.to_frb((1.0, "unitary"), 64) - shifted_frb = shifted_slc.to_frb((1.0, "unitary"), 64) - for slc_field in ["ones", "density"]: - fi = ds._get_field_info(slc_field) - assert_equal(frb[slc_field].info["data_source"], slc.__str__()) - assert_equal(frb[slc_field].info["axis"], ax) - assert_equal(frb[slc_field].info["field"], slc_field) - assert_equal(frb[slc_field].units, Unit(fi.units)) - assert_equal(frb[slc_field].info["xlim"], frb.bounds[:2]) - assert_equal(frb[slc_field].info["ylim"], frb.bounds[2:]) - assert_equal(frb[slc_field].info["center"], slc.center) - assert_equal(frb[slc_field].info["coord"], slc_pos) - assert_equal(frb[slc_field], shifted_frb[slc_field]) - assert_equal(wf, None) + slc = ds.slice(ax, slc_pos) + shifted_slc = ds.slice(ax, slc_pos + grid_eps) + assert_equal(slc["ones"].sum(), slc["ones"].size) + assert_equal(slc["ones"].min(), 1.0) + assert_equal(slc["ones"].max(), 1.0) + assert_equal(np.unique(slc["px"]), uc[xax]) + assert_equal(np.unique(slc["py"]), uc[yax]) + assert_equal(np.unique(slc["pdx"]), 0.5 / dims[xax]) + assert_equal(np.unique(slc["pdy"]), 0.5 / dims[yax]) + pw = slc.to_pw(fields="density") + for p in pw.plots.values(): + tmpfd, tmpname = tempfile.mkstemp(suffix=".png") + os.close(tmpfd) + p.save(name=tmpname) + fns.append(tmpname) + for width in [(1.0, "unitary"), 1.0, ds.quan(0.5, "code_length")]: + frb = slc.to_frb(width, 64) + shifted_frb = shifted_slc.to_frb(width, 64) + for slc_field in ["ones", "density"]: + fi = ds._get_field_info(slc_field) + assert_equal(frb[slc_field].info["data_source"], slc.__str__()) + assert_equal(frb[slc_field].info["axis"], ax) + assert_equal(frb[slc_field].info["field"], slc_field) + assert_equal(frb[slc_field].units, Unit(fi.units)) + assert_equal(frb[slc_field].info["xlim"], frb.bounds[:2]) + assert_equal(frb[slc_field].info["ylim"], frb.bounds[2:]) + assert_equal(frb[slc_field].info["center"], slc.center) + assert_equal(frb[slc_field].info["coord"], slc_pos) + assert_equal(frb[slc_field], shifted_frb[slc_field]) teardown_func(fns) diff --git a/yt/data_objects/tests/test_sph_data_objects.py b/yt/data_objects/tests/test_sph_data_objects.py index e340a3190d4..1885bb5af4f 100644 --- a/yt/data_objects/tests/test_sph_data_objects.py +++ b/yt/data_objects/tests/test_sph_data_objects.py @@ -170,7 +170,7 @@ def test_ray(): def test_cutting(): ds = fake_sph_orientation_ds() for (normal, center), answer in CUTTING_ANSWERS.items(): - for i in range(-1, 2): + for _ in range(3): cen = [c + 0.1 * c for c in center] cut = ds.cutting(normal, cen) assert_equal(cut["gas", "density"].shape[0], answer) diff --git a/yt/fields/tests/test_fields.py b/yt/fields/tests/test_fields.py index 49280db0e86..11f56c9be72 100644 --- a/yt/fields/tests/test_fields.py +++ b/yt/fields/tests/test_fields.py @@ -116,13 +116,13 @@ def __call__(self): def get_base_ds(nprocs): fields, units = [], [] - for fname, (code_units, aliases, dn) in StreamFieldInfo.known_other_fields: + for fname, (code_units, _aliases, _dn) in StreamFieldInfo.known_other_fields: fields.append(("gas", fname)) units.append(code_units) pfields, punits = [], [] - for fname, (code_units, aliases, dn) in StreamFieldInfo.known_particle_fields: + for fname, (code_units, _aliases, _dn) in StreamFieldInfo.known_particle_fields: if fname == "smoothing_lenth": # we test SPH fields elsewhere continue @@ -432,7 +432,7 @@ def test_field_inference(): @requires_file(ISOGAL) def test_deposit_amr(): ds = load(ISOGAL) - for i, g in enumerate(ds.index.grids): + for g in ds.index.grids: gpm = g["particle_mass"].sum() dpm = g["deposit", "all_mass"].sum() assert_allclose_units(gpm, dpm) diff --git a/yt/frontends/adaptahop/io.py b/yt/frontends/adaptahop/io.py index d253b989608..eccdfc0b19a 100644 --- a/yt/frontends/adaptahop/io.py +++ b/yt/frontends/adaptahop/io.py @@ -67,7 +67,7 @@ def _read_particle_fields(self, chunks, ptf, selector): data_files.update(obj.data_files) def iterate_over_attributes(attr_list): - for attr, length, dtype in attr_list: + for attr, _length, _dtype in attr_list: if isinstance(attr, tuple): for a in attr: yield a @@ -273,7 +273,7 @@ def _todo_from_attributes(attributes): def _find_attr_position(key): j = 0 - for attrs, l, k in HALO_ATTRIBUTES: + for attrs, _l, _k in HALO_ATTRIBUTES: if not isinstance(attrs, tuple): attrs = (attrs,) for a in attrs: diff --git a/yt/frontends/amrvac/datfile_utils.py b/yt/frontends/amrvac/datfile_utils.py index af9be8c65e3..e0c3fda3511 100644 --- a/yt/frontends/amrvac/datfile_utils.py +++ b/yt/frontends/amrvac/datfile_utils.py @@ -70,7 +70,7 @@ def get_header(istream): # Read w_names w_names = [] - for i in range(h["nw"]): + for _ in range(h["nw"]): fmt = ALIGN + NAME_LEN * "c" hdr = struct.unpack(fmt, istream.read(struct.calcsize(fmt))) w_names.append(b"".join(hdr).strip().decode()) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index df3651aab86..cfae85145ec 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -112,7 +112,7 @@ def _chunk_all(self, dobj): def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None): sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - for i, og in enumerate(sobjs): + for og in sobjs: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: @@ -254,7 +254,7 @@ def _parse_parameter_file(self): # read the amr header with open(self._file_amr, "rb") as f: amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">") - for to_skip in ["tl", "dtl", "tlold", "dtlold", "iSO"]: + for _to_skip in ["tl", "dtl", "tlold", "dtlold", "iSO"]: fpu.skip(f, endian=">") (self.ncell) = fpu.read_vector(f, "i", ">")[0] # Try to figure out the root grid dimensions diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index 85909567e88..c66cf3dc0b2 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -81,8 +81,8 @@ def _get_mask(self, selector, ftype): def _read_particle_coords(self, chunks, ptf): chunks = list(chunks) - for chunk in chunks: - for ptype, field_list in sorted(ptf.items()): + for _chunk in chunks: + for ptype in sorted(ptf): x = self._get_field((ptype, "particle_position_x")) y = self._get_field((ptype, "particle_position_y")) z = self._get_field((ptype, "particle_position_z")) @@ -90,7 +90,7 @@ def _read_particle_coords(self, chunks, ptf): def _read_particle_fields(self, chunks, ptf, selector): chunks = list(chunks) - for chunk in chunks: + for _chunk in chunks: for ptype, field_list in sorted(ptf.items()): x = self._get_field((ptype, "particle_position_x")) y = self._get_field((ptype, "particle_position_y")) @@ -116,7 +116,7 @@ def _get_field(self, field): rp = partial( read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) - for i, ax in enumerate("xyz"): + for ax in "xyz": if fname.startswith("particle_position_%s" % ax): dd = self.ds.domain_dimensions[0] off = 1.0 / dd @@ -228,7 +228,7 @@ def _get_field(self, field): rp = partial( read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) - for i, ax in enumerate("xyz"): + for ax in "xyz": if fname.startswith("particle_position_%s" % ax): # This is not the same as domain_dimensions dd = self.ds.parameters["ng"] @@ -372,7 +372,7 @@ def cfc(root_level, level, le): if root_level is None: root_level = np.floor(np.log2(le.max() * 1.0 / coarse_grid)) root_level = root_level.astype("int64") - for i in range(10): + for _ in range(10): fc = cfc(root_level, level, le) go = np.diff(np.unique(fc)).min() < 1.1 if go: @@ -426,7 +426,7 @@ def get_ranges( arr_size = np_per_page * real_size idxa, idxb = 0, 0 posa, posb = 0, 0 - for page in range(num_pages): + for _page in range(num_pages): idxb += np_per_page for i, fname in enumerate(["x", "y", "z", "vx", "vy", "vz"]): posb += arr_size diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 0ead88f22ec..faea486ecd2 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -306,7 +306,7 @@ def _chunk_spatial(self, dobj, ngz, preload_fields=None): if ngz > 0: raise NotImplementedError sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - for i, og in enumerate(sobjs): + for og in sobjs: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: diff --git a/yt/frontends/artio/io.py b/yt/frontends/artio/io.py index fdc605405cc..33b5a1ae1f6 100644 --- a/yt/frontends/artio/io.py +++ b/yt/frontends/artio/io.py @@ -25,7 +25,7 @@ def _read_particle_coords(self, chunks, ptf): for chunk in chunks: # These should be organized by grid filename for subset in chunk.objs: rv = dict(**subset.fill_particles(fields)) - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): x, y, z = ( np.asarray(rv[ptype][pn % ax], dtype="=f8") for ax in "XYZ" ) diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index 2f6a8db0f34..4c0fb90611c 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -443,7 +443,7 @@ def _reconstruct_parent_child(self): g for g in self.grids[mask.astype("bool")] if g.Level == grid.Level + 1 ] mylog.debug("Second pass; identifying parents") - for i, grid in enumerate(self.grids): # Second pass + for grid in self.grids: # Second pass for child in grid.Children: child.Parent.append(grid) @@ -485,7 +485,7 @@ def __init__( units_override = {} # This is for backwards-compatibility already_warned = False - for k, v in list(self.specified_parameters.items()): + for k in list(self.specified_parameters.keys()): if k.endswith("_unit") and k not in units_override: if not already_warned: mylog.warning( diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index 004fdaadea7..f41ee1912cd 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -317,9 +317,8 @@ def _parse_parameter_file(self): self._field_map = {} k = 0 - for (i, dname), num_var in zip( - enumerate(self._handle.attrs["DatasetNames"]), - self._handle.attrs["NumVariables"], + for dname, num_var in zip( + self._handle.attrs["DatasetNames"], self._handle.attrs["NumVariables"], ): for j in range(num_var): fname = self._handle.attrs["VariableNames"][k].decode("ascii", "ignore") diff --git a/yt/frontends/athena_pp/io.py b/yt/frontends/athena_pp/io.py index 493c45012de..8da70f46ff2 100644 --- a/yt/frontends/athena_pp/io.py +++ b/yt/frontends/athena_pp/io.py @@ -9,7 +9,7 @@ # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list def grid_sequences(grids): g_iter = sorted(grids, key=lambda g: g.id) - for k, g in groupby(enumerate(g_iter), lambda i_x1: i_x1[0] - i_x1[1].id): + for _, g in groupby(enumerate(g_iter), lambda i_x1: i_x1[0] - i_x1[1].id): seq = list(v[1] for v in g) yield seq diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 6d92c98119f..b4ceeacfd55 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -251,10 +251,10 @@ def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None): self.num_int_base = 2 self.num_real_base = self.dim self.num_real_extra = int(f.readline().strip()) - for i in range(self.num_real_extra): + for _ in range(self.num_real_extra): self.real_component_names.append(f.readline().strip()) self.num_int_extra = int(f.readline().strip()) - for i in range(self.num_int_extra): + for _ in range(self.num_int_extra): self.int_component_names.append(f.readline().strip()) self.num_int = self.num_int_base + self.num_int_extra self.num_real = self.num_real_base + self.num_real_extra @@ -829,7 +829,7 @@ def _parse_header_file(self): base_log = np.log2(self.refine_by) self.level_offsets = [0] # level 0 has to have 0 offset lo = 0 - for lm1, rf in enumerate(self.ref_factors): + for rf in self.ref_factors: lo += int(np.log2(rf) / base_log) - 1 self.level_offsets.append(lo) # assert(np.unique(ref_factors).size == 1) @@ -851,7 +851,7 @@ def _parse_header_file(self): header_file.readline() self._header_mesh_start = header_file.tell() # Skip the cell size information per level - we'll get this later - for i in range(self._max_level + 1): + for _ in range(self._max_level + 1): header_file.readline() # Get the geometry next_line = header_file.readline() diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index 9df3c3d8167..7a9014c09f5 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -115,7 +115,7 @@ def _read_particles(self): (particles_per_grid, level_particles) ) - for i, grid in enumerate(self.grids): + for i, _grid in enumerate(self.grids): self.grids[i].NumberOfParticles = particles_per_grid[i] self.grid_particle_count[i] = particles_per_grid[i] @@ -527,7 +527,7 @@ def _parse_parameter_file(self): ): self.periodicity[il] = ll.split()[1] == "periodic" self.periodicity = tuple(self.periodicity) - for il, ll in enumerate(lines[lines.index("[Parameters]") + 2 :]): + for ll in lines[lines.index("[Parameters]") + 2 :]: if ll.split()[0] == "GAMMA": self.gamma = float(ll.split()[1]) self.domain_left_edge = domain_left_edge @@ -684,7 +684,7 @@ def _parse_inputs_file(self, ini_filename): self.ini_filename = self._localize(self.ini_filename, ini_filename) lines = open(self.ini_filename).readlines() # read the file line by line, storing important parameters - for lineI, line in enumerate(lines): + for line in lines: try: param, sep, vals = line.partition("=") if not sep: diff --git a/yt/frontends/enzo/io.py b/yt/frontends/enzo/io.py index fa25fd55104..2fdb62bbded 100644 --- a/yt/frontends/enzo/io.py +++ b/yt/frontends/enzo/io.py @@ -255,7 +255,7 @@ def _read_particle_coords(self, chunks, ptf): nap = sum(g.NumberOfActiveParticles.values()) if g.NumberOfParticles == 0 and nap == 0: continue - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): x, y, z = ( self.grids_in_memory[g.id]["particle_position_x"], self.grids_in_memory[g.id]["particle_position_y"], diff --git a/yt/frontends/enzo_p/data_structures.py b/yt/frontends/enzo_p/data_structures.py index 795db893bdc..3106b201870 100644 --- a/yt/frontends/enzo_p/data_structures.py +++ b/yt/frontends/enzo_p/data_structures.py @@ -144,7 +144,7 @@ def _count_grids(self): f.seek(0) offset = f.tell() ngrids = 0 - for ib in range(nblocks): + for _ in range(nblocks): my_block = min(fblock_size, file_size - offset) buff = f.read(my_block) ngrids += buff.count("\n") @@ -171,11 +171,11 @@ def _parse_index(self): child_id = nroot_blocks last_pid = None - for ib in range(nblocks): + for _ib in range(nblocks): fblock = min(fblock_size, file_size - offset) buff = lstr + f.read(fblock) bnl = 0 - for inl in range(buff.count("\n")): + for _inl in range(buff.count("\n")): nnl = buff.find("\n", bnl) line = buff[bnl:nnl] block_name, block_file = line.split() diff --git a/yt/frontends/enzo_p/tests/test_misc.py b/yt/frontends/enzo_p/tests/test_misc.py index 75e4ecd393b..d8b236ae7d5 100644 --- a/yt/frontends/enzo_p/tests/test_misc.py +++ b/yt/frontends/enzo_p/tests/test_misc.py @@ -73,7 +73,7 @@ def flip_random_block_bit(block, rs): def test_get_block_info(): rs = np.random.RandomState(45047) max_n = 64 - for i in range(10): + for _ in range(10): n, l, b = get_random_block_string(max_n=max_n, random_state=rs) level, left, right = get_block_info(b, min_dim=1) assert level == l @@ -108,7 +108,7 @@ def test_is_parent(): max_n = 2 ** i descriptors = [] - for j in range(dim): + for _ in range(dim): n1, l1, b1 = get_random_block_string( max_n=max_n, random_state=rs, level=0 ) @@ -131,7 +131,7 @@ def test_nested_dict_get(): rs = np.random.RandomState(47988) keys = [] my_dict = None - for i in range(5): + for _ in range(5): k = str(rs.randint(0, high=1000000)) if my_dict is None: v = str(rs.randint(0, high=1000000)) diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index ae985bf22fb..19775cd0385 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -286,7 +286,7 @@ def _repr_html_(self): from yt.visualization._mpl_imports import FigureCanvasAgg ret = "" - for k, v in self.plots.items(): + for v in self.plots.values(): canvas = FigureCanvasAgg(v) f = BytesIO() canvas.print_figure(f) diff --git a/yt/frontends/flash/io.py b/yt/frontends/flash/io.py index e72ca882750..63d24fd0d2f 100644 --- a/yt/frontends/flash/io.py +++ b/yt/frontends/flash/io.py @@ -10,14 +10,14 @@ # http://stackoverflow.com/questions/2361945/detecting-consecutive-integers-in-a-list def particle_sequences(grids): g_iter = sorted(grids, key=lambda g: g.id) - for k, g in groupby(enumerate(g_iter), lambda i_x: i_x[0] - i_x[1].id): + for _k, g in groupby(enumerate(g_iter), lambda i_x: i_x[0] - i_x[1].id): seq = list(v[1] for v in g) yield seq[0], seq[-1] def grid_sequences(grids): g_iter = sorted(grids, key=lambda g: g.id) - for k, g in groupby(enumerate(g_iter), lambda i_x1: i_x1[0] - i_x1[1].id): + for _k, g in groupby(enumerate(g_iter), lambda i_x1: i_x1[0] - i_x1[1].id): seq = list(v[1] for v in g) yield seq diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 40c4f7c41d1..8f74bc678e3 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -53,7 +53,7 @@ def _read_particle_coords(self, chunks, ptf): si, ei = data_file.start, data_file.end f = h5py.File(data_file.filename, mode="r") # This double-reads - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): if data_file.total_particles[ptype] == 0: continue c = f["/%s/Coordinates" % ptype][si:ei, :].astype("float64") diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index ee621e6cb0e..72f58d81092 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -28,7 +28,7 @@ def _read_particle_coords(self, chunks, ptf): data_files.update(obj.data_files) for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): coords = data_file._get_particle_positions(ptype, f=f) if coords is None: continue diff --git a/yt/frontends/gamer/io.py b/yt/frontends/gamer/io.py index 933d5ccf53d..04e9ea10677 100644 --- a/yt/frontends/gamer/io.py +++ b/yt/frontends/gamer/io.py @@ -14,13 +14,13 @@ # group grids with consecutive indices together to improve the I/O performance # --> grids are assumed to be sorted into ascending numerical order already def grid_sequences(grids): - for k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id): + for _k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id): seq = list(v[1] for v in g) yield seq def particle_sequences(grids): - for k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id): + for _k, g in groupby(enumerate(grids), lambda i_x: i_x[0] - i_x[1].id): seq = list(v[1] for v in g) yield seq[0], seq[-1] diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index f0ff85e8480..ea775907d5f 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -108,7 +108,7 @@ def _parse_index(self): def _populate_grid_objects(self): mask = np.empty(self.grids.size, dtype="int32") - for gi, g in enumerate(self.grids): + for g in self.grids: g._prepare_grid() g._setup_dx() diff --git a/yt/frontends/moab/tests/test_c5.py b/yt/frontends/moab/tests/test_c5.py index 5fe8c0789ed..447fea37388 100644 --- a/yt/frontends/moab/tests/test_c5.py +++ b/yt/frontends/moab/tests/test_c5.py @@ -40,8 +40,8 @@ def test_cantor_5(): DRE = ds.domain_right_edge ray = ds.ray(DLE + offset_1 * DLE.uq, DRE - offset_2 * DRE.uq) assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8) - for i, p1 in enumerate(np.random.random((5, 3))): - for j, p2 in enumerate(np.random.random((5, 3))): + for p1 in np.random.random((5, 3)): + for p2 in np.random.random((5, 3)): ray = ds.ray(p1, p2) assert_almost_equal(ray["dts"].sum(dtype="float64"), 1.0, 8) for field in _fields: diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 2821c7f3c86..8ce016f1730 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -250,7 +250,7 @@ def _count_grids(self): self.vpg = int(self.dataset.gridsize / 4) # 4Byte per value (f32) # Meshes of the same size do not need separate chunks - for (shape, spacing, offset, unit_si) in set(self.meshshapes.values()): + for (shape, _spacing, _offset, _unit_si) in set(self.meshshapes.values()): self.num_grids += min( shape[0], int(np.ceil(reduce(mul, shape) * self.vpg ** -1)) ) diff --git a/yt/frontends/owls_subfind/io.py b/yt/frontends/owls_subfind/io.py index e9f46d5a124..d8a27da11db 100644 --- a/yt/frontends/owls_subfind/io.py +++ b/yt/frontends/owls_subfind/io.py @@ -26,7 +26,7 @@ def _read_particle_coords(self, chunks, ptf): data_files.update(obj.data_files) for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): pcount = data_file.total_particles[ptype] coords = f[ptype]["CenterOfMass"][()].astype("float64") coords = np.resize(coords, (pcount, 3)) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a862424f114..30dbda5ee9e 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -420,7 +420,7 @@ def _chunk_all(self, dobj): def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None): sobjs = getattr(dobj._current_chunk, "objs", dobj._chunk_info) - for i, og in enumerate(sobjs): + for og in sobjs: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: @@ -670,10 +670,10 @@ def read_rhs(f, cast): rheader[p.strip()] = cast(v.strip()) with open(self.parameter_filename) as f: - for i in range(6): + for _ in range(6): read_rhs(f, int) f.readline() - for i in range(11): + for _ in range(11): read_rhs(f, float) f.readline() read_rhs(f, str) @@ -687,7 +687,7 @@ def read_rhs(f, cast): self.hilbert_indices = {} if rheader["ordering type"] == "hilbert": f.readline() # header - for n in range(rheader["ncpu"]): + for _ in range(rheader["ncpu"]): dom, mi, ma = f.readline().split() self.hilbert_indices[int(dom)] = (float(mi), float(ma)) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index c076e3c585f..a2bc835aee0 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -492,17 +492,17 @@ def read_rhs(cast): with open(fname, "r") as f: # Read nRTvar, nions, ngroups, iions - for i in range(4): + for _ in range(4): read_rhs(int) f.readline() # Read X and Y fractions - for i in range(2): + for _ in range(2): read_rhs(float) f.readline() # Reat unit_np, unit_pfd - for i in range(2): + for _ in range(2): read_rhs(float) # Read rt_c_frac @@ -512,7 +512,7 @@ def read_rhs(cast): f.readline() # Read n star, t2star, g_star - for i in range(3): + for _ in range(3): read_rhs(float) # Touchy part, we have to read the photon group properties diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 58752a15d83..2ea59228d45 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -244,7 +244,7 @@ def test_ramses_sink(): ad = ds.all_data() for field in expected_fields: - assert ("sink", "field") not in ds.field_list + assert ("sink", field) not in ds.field_list ramses_new_format = "ramses_new_format/output_00002/info_00002.txt" diff --git a/yt/frontends/sdf/io.py b/yt/frontends/sdf/io.py index 8ffbcc6e379..4398acf44bd 100644 --- a/yt/frontends/sdf/io.py +++ b/yt/frontends/sdf/io.py @@ -27,7 +27,7 @@ def _read_particle_coords(self, chunks, ptf): for obj in chunk.objs: data_files.update(obj.data_files) assert len(data_files) == 1 - for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + for _data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): yield "dark_matter", ( self._handle["x"], self._handle["y"], @@ -43,7 +43,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for obj in chunk.objs: data_files.update(obj.data_files) assert len(data_files) == 1 - for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): + for _data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): for ptype, field_list in sorted(ptf.items()): x = self._handle["x"] y = self._handle["y"] @@ -110,7 +110,7 @@ def _read_particle_coords(self, chunks, ptf): for obj in chunk.objs: data_files.update(obj.data_files) assert len(data_files) == 1 - for data_file in data_files: + for _data_file in data_files: pcount = self._handle["x"].size yield "dark_matter", ( self._handle["x"][:pcount], @@ -127,7 +127,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for obj in chunk.objs: data_files.update(obj.data_files) assert len(data_files) == 1 - for data_file in data_files: + for _data_file in data_files: pcount = self._handle["x"].size for ptype, field_list in sorted(ptf.items()): x = self._handle["x"][:pcount] @@ -169,7 +169,7 @@ def _read_particle_fields(self, chunks, ptf, selector): dle = self.ds.domain_left_edge.in_units("code_length").d dre = self.ds.domain_right_edge.in_units("code_length").d required_fields = [] - for ptype, field_list in sorted(ptf.items()): + for field_list in sorted(ptf.values()): for field in field_list: if field == "mass": continue diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index f8a2e88749e..2575d51a890 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -435,7 +435,7 @@ def assign_particle_data(ds, pdata, bbox): ) grid_pdata = [] - for i in range(num_grids): + for _ in range(num_grids): grid = {"number_of_particles": 0} grid_pdata.append(grid) @@ -1764,7 +1764,7 @@ def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None): # This is where we will perform cutting of the Octree and # load-balancing. That may require a specialized selector object to # cut based on some space-filling curve index. - for i, og in enumerate(sobjs): + for og in sobjs: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: diff --git a/yt/frontends/stream/io.py b/yt/frontends/stream/io.py index ebd4c15f3d3..0738292fc84 100644 --- a/yt/frontends/stream/io.py +++ b/yt/frontends/stream/io.py @@ -58,7 +58,7 @@ def _read_particle_coords(self, chunks, ptf): if g.NumberOfParticles == 0: continue gf = self.fields[g.id] - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): if (ptype, "particle_position") in gf: x, y, z = gf[ptype, "particle_position"].T else: @@ -109,7 +109,7 @@ def _read_particle_coords(self, chunks, ptf): ): f = self.fields[data_file.filename] # This double-reads - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): yield ptype, ( f[ptype, "particle_position_x"], f[ptype, "particle_position_y"], diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py index 92842dbb60d..b0582375a8c 100644 --- a/yt/frontends/swift/io.py +++ b/yt/frontends/swift/io.py @@ -30,7 +30,7 @@ def _read_particle_coords(self, chunks, ptf): si, ei = sub_file.start, sub_file.end f = h5py.File(sub_file.filename, "r") # This double-reads - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): if sub_file.total_particles[ptype] == 0: continue pos = f["/%s/Coordinates" % ptype][si:ei, :] diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index b31ede5e704..6415850b383 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -96,9 +96,7 @@ def _read_particle_coords(self, chunks, ptf): poff = data_file.field_offsets tp = data_file.total_particles f = open(data_file.filename, "rb") - for ptype, field_list in sorted( - ptf.items(), key=lambda a: poff.get(a[0], -1) - ): + for ptype in sorted(ptf, key=lambda a: poff.get(a, -1)): if data_file.total_particles[ptype] == 0: continue f.seek(poff[ptype]) @@ -246,7 +244,7 @@ def _update_domain(self, data_file): f.seek(ds._header_offset) mi = np.array([1e30, 1e30, 1e30], dtype="float64") ma = -np.array([1e30, 1e30, 1e30], dtype="float64") - for iptype, ptype in enumerate(self._ptypes): + for ptype in self._ptypes: # We'll just add the individual types separately count = data_file.total_particles[ptype] if count == 0: @@ -288,7 +286,7 @@ def _update_domain(self, data_file): def _yield_coordinates(self, data_file, needed_ptype=None): with open(data_file.filename, "rb") as f: poff = data_file.field_offsets - for iptype, ptype in enumerate(self._ptypes): + for ptype in self._ptypes: if ptype not in poff: continue f.seek(poff[ptype]) @@ -438,7 +436,7 @@ def _calculate_particle_offsets(self, data_file, pcounts): pos = data_file.ds._header_offset global_offsets = {} field_offsets = {} - for i, ptype in enumerate(self._ptypes): + for ptype in self._ptypes: if ptype not in self._pdtypes: # This means we don't have any, I think, and so we shouldn't # stick it in the offsets. diff --git a/yt/frontends/ytdata/io.py b/yt/frontends/ytdata/io.py index 300123acbda..987fd9d8a22 100644 --- a/yt/frontends/ytdata/io.py +++ b/yt/frontends/ytdata/io.py @@ -200,7 +200,7 @@ def _read_particle_coords(self, chunks, ptf): data_files.update(obj.data_files) for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): pcount = data_file.total_particles[ptype] if pcount == 0: continue @@ -318,7 +318,7 @@ def _read_particle_coords(self, chunks, ptf): data_files.update(obj.data_files) for data_file in sorted(data_files, key=lambda x: (x.filename, x.start)): with h5py.File(data_file.filename, mode="r") as f: - for ptype, field_list in sorted(ptf.items()): + for ptype in sorted(ptf): pcount = data_file.total_particles[ptype] if pcount == 0: continue diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index ae7b0f0534d..7763a2cb993 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -451,7 +451,7 @@ def __iter__(self): def __next__(self): if len(self.queue) == 0: - for i in range(self.max_length): + for _ in range(self.max_length): try: self.queue.append(next(self.base_iter)) except StopIteration: diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 46a0d83fb72..87db8ff1538 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -391,7 +391,7 @@ def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None): preload_fields, _ = self._split_fields(preload_fields) if self._preload_implemented and len(preload_fields) > 0 and ngz == 0: giter = ChunkDataCache(list(giter), preload_fields, self) - for i, og in enumerate(giter): + for og in giter: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: diff --git a/yt/geometry/tests/test_particle_octree.py b/yt/geometry/tests/test_particle_octree.py index ec6719b7b89..0bf46234bbf 100644 --- a/yt/geometry/tests/test_particle_octree.py +++ b/yt/geometry/tests/test_particle_octree.py @@ -37,7 +37,7 @@ def test_add_particles_random(): for ndom in [1, 2, 4, 8]: octree = ParticleOctreeContainer((1, 1, 1), DLE, DRE) octree.n_ref = 32 - for dom, split in enumerate(np.array_split(morton, ndom)): + for split in np.array_split(morton, ndom): octree.add(split) octree.finalize() # This visits every oct. @@ -560,7 +560,7 @@ def fake_decomp_hilbert_gaussian( gpos = np.clip( np.random.normal(DLE[k] + DW[k] / 2.0, DW[k] / 10.0, npart), DLE[k], DRE[k] ) - for p, ipos in enumerate(gpos): + for ipos in gpos: for i in range(len(hlist)): if iLE[i, k] <= ipos < iRE[i, k]: pos[count[k], k] = ipos diff --git a/yt/geometry/unstructured_mesh_handler.py b/yt/geometry/unstructured_mesh_handler.py index 84890b0962a..881a6706cfb 100644 --- a/yt/geometry/unstructured_mesh_handler.py +++ b/yt/geometry/unstructured_mesh_handler.py @@ -65,7 +65,7 @@ def _chunk_spatial(self, dobj, ngz, sort=None, preload_fields=None): # This is where we will perform cutting of the Octree and # load-balancing. That may require a specialized selector object to # cut based on some space-filling curve index. - for i, og in enumerate(sobjs): + for og in sobjs: if ngz > 0: g = og.retrieve_ghost_zones(ngz, [], smoothed=True) else: diff --git a/yt/pmods.py b/yt/pmods.py index 1a96f783f08..8e94ce0d71a 100644 --- a/yt/pmods.py +++ b/yt/pmods.py @@ -307,7 +307,7 @@ def __determine_parent__(globals, level): if "." in pname: if level > 0: end = len(pname) - for l in range(level): + for _ in range(level): i = pname.rfind(".", 0, end) end = i else: diff --git a/yt/testing.py b/yt/testing.py index 2588734d4aa..97cf8656995 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -585,7 +585,7 @@ def construct_octree_mask(prng=RandomState(0x1D3D3D3), refined=None): # noqa B0 return refined # Loop over subcells - for subcell in range(8): + for _ in range(8): # Insert criterion for whether cell should be sub-divided. Here we # just use a random number to demonstrate. divide = prng.random_sample() < 0.12 diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index d5725392a56..660cf358076 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -214,7 +214,7 @@ def get(self, ds_name, default=None): except urllib.error.HTTPError: raise YTNoOldAnswer(url) else: - for this_try in range(3): + for _ in range(3): try: data = resp.read() except Exception: @@ -780,7 +780,7 @@ def compare(self, new_result, old_result): def compare_image_lists(new_result, old_result, decimals): fns = [] - for i in range(2): + for _ in range(2): tmpfd, tmpname = tempfile.mkstemp(suffix=".png") os.close(tmpfd) fns.append(tmpname) diff --git a/yt/utilities/flagging_methods.py b/yt/utilities/flagging_methods.py index 159417d3136..01743a5a72e 100644 --- a/yt/utilities/flagging_methods.py +++ b/yt/utilities/flagging_methods.py @@ -52,7 +52,7 @@ def find_subgrids(self): continue while not psg.acceptable: new_psgs = [] - for i, dim in enumerate(np.argsort(psg.dimensions)[::-1]): + for dim in np.argsort(psg.dimensions)[::-1]: new_psgs = psg.find_by_zero_signature(dim) if len(new_psgs) > 1: break diff --git a/yt/utilities/fortran_utils.py b/yt/utilities/fortran_utils.py index 807b036a9ca..2b3325544f9 100644 --- a/yt/utilities/fortran_utils.py +++ b/yt/utilities/fortran_utils.py @@ -52,7 +52,7 @@ def read_attrs(f, attrs, endian="="): """ vv = {} net_format = endian - for a, n, t in attrs: + for _a, n, t in attrs: for end in "@=<>": t = t.replace(end, "") net_format += "".join(["I"] + ([t] * n) + ["I"]) @@ -130,7 +130,7 @@ def read_cattrs(f, attrs, endian="="): """ vv = {} net_format = endian - for a, n, t in attrs: + for _a, n, t in attrs: for end in "@=<>": t = t.replace(end, "") net_format += "".join([t] * n) @@ -315,7 +315,7 @@ def read_record(f, rspec, endian="="): """ vv = {} net_format = endian + "I" - for a, n, t in rspec: + for _a, n, t in rspec: t = t if len(t) == 1 else t[-1] net_format += "%s%s" % (n, t) net_format += "I" @@ -331,7 +331,7 @@ def read_record(f, rspec, endian="="): s2, ) pos = 0 - for a, n, t in rspec: + for a, n, _t in rspec: vv[a] = vals[pos : pos + n] pos += n return vv diff --git a/yt/utilities/lib/tests/test_geometry_utils.py b/yt/utilities/lib/tests/test_geometry_utils.py index 24467d7383c..14011ba55aa 100644 --- a/yt/utilities/lib/tests/test_geometry_utils.py +++ b/yt/utilities/lib/tests/test_geometry_utils.py @@ -946,7 +946,7 @@ def test_knn_direct(seed=1): rad = np.arange(N, dtype=np.float64) pos = np.vstack(3 * [rad ** 2 / 3.0]).T sort_shf = np.arange(N, dtype=np.uint64) - for i in range(20): + for _ in range(20): np.random.shuffle(sort_shf) sort_ans = np.argsort(sort_shf)[:k] sort_out = knn_direct(pos[sort_shf, :], k, sort_ans[0], idx) diff --git a/yt/utilities/nodal_data_utils.py b/yt/utilities/nodal_data_utils.py index 36e27e00973..91a86b5564f 100644 --- a/yt/utilities/nodal_data_utils.py +++ b/yt/utilities/nodal_data_utils.py @@ -44,10 +44,10 @@ def get_nodal_slices(shape, nodal_flag, dim): else: dir_slices[i] = [slice(0, shape[i])] - for i, sl_i in enumerate(dir_slices[0]): - for j, sl_j in enumerate(dir_slices[1]): + for sl_i in dir_slices[0]: + for sl_j in dir_slices[1]: if dim > 2: - for k, sl_k in enumerate(dir_slices[2]): + for sl_k in dir_slices[2]: slices.append([sl_i, sl_j, sl_k]) else: slices.append([sl_i, sl_j]) diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index 327f7f1eca5..224d6f4ea2d 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -404,7 +404,7 @@ def free_workgroup(self, workgroup): def free_all(self): for wg in self.workgroups: self.free_workgroup(wg) - for i in range(len(self.workgroups)): + while self.workgroups: self.workgroups.pop(0) @classmethod @@ -915,7 +915,7 @@ def mpi_Request_Waitall(self, hooks): MPI.Request.Waitall(hooks) def mpi_Request_Waititer(self, hooks): - for i in range(len(hooks)): + for _hook in hooks: req = MPI.Request.Waitany(hooks) yield req @@ -1351,7 +1351,7 @@ def inc(self, n=-1): old_item = self.item if n == -1: n = self.comm.size - for i in range(n): + for _ in range(n): if self.pointer >= self.num_items - self.comm.size: break self.owned[self.pointer % self.comm.size] += self.comm.size @@ -1363,7 +1363,7 @@ def dec(self, n=-1): old_item = self.item if n == -1: n = self.comm.size - for i in range(n): + for _ in range(n): if self.pointer == 0: break self.owned[(self.pointer - 1) % self.comm.size] -= self.comm.size diff --git a/yt/utilities/tests/test_selectors.py b/yt/utilities/tests/test_selectors.py index f21b4eea78b..c383ae52af3 100644 --- a/yt/utilities/tests/test_selectors.py +++ b/yt/utilities/tests/test_selectors.py @@ -145,7 +145,7 @@ def test_cutting_plane_selector(): assert all(ds.periodicity) # test cutting plane against orthogonal plane - for i, d in enumerate("xyz"): + for i in range(3): norm = np.zeros(3) norm[i] = 1.0 diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 251c90efaba..2b2b517773c 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -1288,19 +1288,19 @@ def multiplot( if not _yt: if xranges is None: xranges = [] - for i in range(npanels): + for _ in range(npanels): xranges.append((0, 1)) if yranges is None: yranges = [] - for i in range(npanels): + for _ in range(npanels): yranges.append((0, 1)) if xlabels is None: xlabels = [] - for i in range(npanels): + for _ in range(npanels): xlabels.append("") if ylabels is None: ylabels = [] - for i in range(npanels): + for _ in range(npanels): ylabels.append("") d = DualEPS(figsize=figsize) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index bd5da9b60cb..92abaabef92 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -499,7 +499,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if suffix is None: suffix = get_image_suffix(name) if suffix != "": - for k, v in self.plots.items(): + for v in self.plots.values(): names.append(v.save(name, mpl_kwargs)) return names if hasattr(self.data_source, "axis"): @@ -555,7 +555,7 @@ def show(self): """ interactivity = self.plots[list(self.plots.keys())[0]].interactivity if interactivity: - for k, v in sorted(self.plots.items()): + for v in sorted(self.plots.values()): v.show() else: if "__IPYTHON__" in dir(builtins): diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 8cccaac83f1..5f79e4abbce 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -779,7 +779,7 @@ def __call__(self, plot): else: pxs, pys = np.mgrid[0:0:1j, 0:0:1j] GLE, GRE, levels, block_ids = [], [], [], [] - for block, mask in plot.data.blocks: + for block, _mask in plot.data.blocks: GLE.append(block.LeftEdge.in_units("code_length")) GRE.append(block.RightEdge.in_units("code_length")) levels.append(block.Level) diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index f7b69e05215..bec52d51dfa 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -355,10 +355,10 @@ def _repr_html_(self): ret = "" unique = set(self.plots.values()) if len(unique) < len(self.plots): - iters = zip(range(len(unique)), sorted(unique)) + iters = sorted(unique) else: - iters = self.plots.items() - for uid, plot in iters: + iters = self.plots.values() + for plot in iters: with matplotlib_style_context(): img = plot._repr_png_() img = base64.b64encode(img).decode() @@ -1303,7 +1303,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if suffix is None: suffix = get_image_suffix(name) if suffix != "": - for k, v in self.plots.items(): + for v in self.plots.values(): names.append(v.save(name, mpl_kwargs)) return names else: diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index 16f41c40ef4..ed78f790458 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -383,7 +383,7 @@ def test_particle_plot_wf(self): def test_creation_with_width(self): test_ds = fake_particle_ds() - for width, (xlim, ylim, pwidth, aun) in WIDTH_SPECS.items(): + for width, (xlim, ylim, pwidth, _aun) in WIDTH_SPECS.items(): plot = ParticleProjectionPlot(test_ds, 0, "particle_mass", width=width) xlim = [plot.ds.quan(el[0], el[1]) for el in xlim] diff --git a/yt/visualization/volume_rendering/UBVRI.py b/yt/visualization/volume_rendering/UBVRI.py index d263e45a720..33a273fb167 100644 --- a/yt/visualization/volume_rendering/UBVRI.py +++ b/yt/visualization/volume_rendering/UBVRI.py @@ -681,7 +681,7 @@ ), ) -for filter, vals in johnson_filters.items(): +for vals in johnson_filters.values(): wavelen = vals["wavelen"] trans = vals["trans"] vals["Lchar"] = wavelen[np.argmax(trans)] diff --git a/yt/visualization/volume_rendering/interactive_loop.py b/yt/visualization/volume_rendering/interactive_loop.py index cf590caea54..37c6e7040e3 100644 --- a/yt/visualization/volume_rendering/interactive_loop.py +++ b/yt/visualization/volume_rendering/interactive_loop.py @@ -84,9 +84,7 @@ def setup_loop(self, scene, camera): return callbacks def start_loop(self, scene, camera): - callbacks = self.setup_loop(scene, camera) - for i in self(scene, camera, callbacks): - pass + self.setup_loop(scene, camera) def __call__(self, scene, camera, callbacks): camera.compute_matrices() @@ -180,9 +178,7 @@ def setup_loop(self, scene, camera): return callbacks def start_loop(self, scene, camera): - callbacks = self.setup_loop(scene, camera) - for i in self(scene, camera, callbacks): - pass + self.setup_loop(scene, camera) def __call__(self, scene, camera, callbacks): while not glfw.WindowShouldClose(self.window) or self.should_quit: diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 26d4328c93a..09044838325 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -367,7 +367,7 @@ def temp_weightfield(a, b): mylog.debug("Casting rays") - for i, (grid, mask) in enumerate(data_source.blocks): + for (grid, mask) in data_source.blocks: data = [] for f in fields: # strip units before multiplying by mask for speed diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 14b6d611ead..3136bb69daf 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -299,7 +299,7 @@ def draw_grids(self, im, alpha=0.3, cmap=None, min_level=None, max_level=None): region = self.data_source corners = [] levels = [] - for block, mask in region.blocks: + for block, _mask in region.blocks: block_corners = np.array( [ [block.LeftEdge[0], block.LeftEdge[1], block.LeftEdge[2]], @@ -972,7 +972,7 @@ def zoomin(self, final, n_steps, clip_ratio=None): ... iw.write_bitmap(snapshot, "zoom_%04i.png" % i) """ f = final ** (1.0 / n_steps) - for i in range(n_steps): + for _ in range(n_steps): self.zoom(f) yield self.snapshot(clip_ratio=clip_ratio) @@ -1035,7 +1035,7 @@ def move_to( else: dW = self.ds.arr([0.0, 0.0, 0.0], "code_length") dx = (final - self.center) * 1.0 / n_steps - for i in range(n_steps): + for _ in range(n_steps): if exponential: self.switch_view(center=self.center * dx, width=self.width * dW) else: @@ -1177,7 +1177,7 @@ def rotation(self, theta, n_steps, rot_vector=None, clip_ratio=None): """ dtheta = (1.0 * theta) / n_steps - for i in range(n_steps): + for _ in range(n_steps): self.rotate(dtheta, rot_vector=rot_vector) yield self.snapshot(clip_ratio=clip_ratio) @@ -2220,7 +2220,7 @@ def _render(self, double_check, num_threads, image, sampler): # Now we have a bounding box. data_source = ds.region(self.center, mi, ma) - for i, (grid, mask) in enumerate(data_source.blocks): + for (grid, mask) in data_source.blocks: data = [(grid[field] * mask).astype("float64") for field in fields] pg = PartitionedGrid( grid.id, diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 9af84e93193..871bfd98862 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -1185,7 +1185,7 @@ def __init__( self.data_source = data_source_or_all(data_source) corners = [] levels = [] - for block, mask in self.data_source.blocks: + for block, _mask in self.data_source.blocks: block_corners = np.array( [ [block.LeftEdge[0], block.LeftEdge[1], block.LeftEdge[2]], diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 27274eb0b87..b6b4aa95379 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -539,7 +539,7 @@ def _annotate(self, ax, tf, source, label="", label_fmt=None): def _validate(self): r"""Validate the current state of the scene.""" - for k, source in self.sources.items(): + for source in self.sources.values(): source._validate() return @@ -577,11 +577,11 @@ def composite(self, camera=None): empty = camera.lens.new_image(camera) opaque = ZBuffer(empty, np.full(empty.shape[:2], np.inf)) - for k, source in self.opaque_sources: + for _, source in self.opaque_sources: source.render(camera, zbuffer=opaque) im = source.zbuffer.rgba - for k, source in self.transparent_sources: + for _, source in self.transparent_sources: im = source.render(camera, zbuffer=opaque) opaque.rgba = im @@ -816,7 +816,7 @@ def annotate_mesh_lines(self, color=None, alpha=1.0): The opacity of the mesh lines. Default is 255 (solid). """ - for k, source in self.opaque_sources: + for _, source in self.opaque_sources: if isinstance(source, MeshSource): source.annotate_mesh_lines(color=color, alpha=alpha) return self diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index ba90d078866..cc8b511fcee 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -65,7 +65,7 @@ def test_rotation_volume_rendering(self): angle = 2 * np.pi frames = 4 - for i in range(frames): + for _ in range(frames): sc.camera.yaw(angle / frames) sc.render() diff --git a/yt/visualization/volume_rendering/tests/test_vr_orientation.py b/yt/visualization/volume_rendering/tests/test_vr_orientation.py index 22736096f7d..f9973efbc28 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py +++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py @@ -52,7 +52,7 @@ def test_orientation(): test1.answer_name = test_name yield test1 - for i in range(n_frames): + for _ in range(n_frames): frame += 1 center = ds.arr([0, 0, 0], "code_length") cam.yaw(theta, rot_center=center) @@ -61,7 +61,7 @@ def test_orientation(): test2.answer_name = test_name yield test2 - for i in range(n_frames): + for _ in range(n_frames): frame += 1 theta = np.pi / n_frames center = ds.arr([0, 0, 0], "code_length") @@ -71,7 +71,7 @@ def test_orientation(): test3.answer_name = test_name yield test3 - for i in range(n_frames): + for _ in range(n_frames): frame += 1 theta = np.pi / n_frames center = ds.arr([0, 0, 0], "code_length") From a2073d6da55d5049442be651d0d48dae95e934ed Mon Sep 17 00:00:00 2001 From: Chris Evans Date: Sat, 1 Aug 2020 14:07:55 -0400 Subject: [PATCH 181/653] Correct child mask for sibling grids --- yt/data_objects/grid_patch.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index a80d461f71d..a82b87965af 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -214,7 +214,7 @@ def child_mask(self): for child in self.Children: self._fill_child_mask(child, child_mask, 0) for sibling in self.OverlappingSiblings or []: - self._fill_child_mask(sibling, child_mask, 0) + self._fill_child_mask(sibling, child_mask, 0, dlevel=0) return child_mask @property @@ -232,7 +232,7 @@ def child_index_mask(self): for child in self.Children: self._fill_child_mask(child, child_index_mask, child.id) for sibling in self.OverlappingSiblings or []: - self._fill_child_mask(sibling, child_index_mask, sibling.id) + self._fill_child_mask(sibling, child_index_mask, sibling.id, dlevel=0) return child_index_mask def retrieve_ghost_zones(self, n_zones, fields, all_levels=False, smoothed=False): From b6f50e55a4f3240aaf0eb287f03234e2d2aa7d05 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:16:25 +0200 Subject: [PATCH 182/653] avoid confusing enumerate Co-authored-by: Corentin Cadiou --- yt/data_objects/tests/test_ortho_rays.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_ortho_rays.py b/yt/data_objects/tests/test_ortho_rays.py index 5ba000ed3be..63ffecafe80 100644 --- a/yt/data_objects/tests/test_ortho_rays.py +++ b/yt/data_objects/tests/test_ortho_rays.py @@ -8,7 +8,7 @@ def test_ortho_ray(): dx = (ds.domain_right_edge - ds.domain_left_edge) / ds.domain_dimensions axes = ["x", "y", "z"] - for ax, _an in enumerate(axes): + for ax in range(3): ocoord = ds.arr(np.random.random(2), "code_length") my_oray = ds.ortho_ray(ax, ocoord) From 4473d0f56b11103fa790fc003e7790dfd97f39e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:28:16 +0200 Subject: [PATCH 183/653] PEP 3132 tuple unpacking Co-authored-by: Corentin Cadiou --- yt/fields/tests/test_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/tests/test_fields.py b/yt/fields/tests/test_fields.py index 11f56c9be72..55acd2aba4d 100644 --- a/yt/fields/tests/test_fields.py +++ b/yt/fields/tests/test_fields.py @@ -116,7 +116,7 @@ def __call__(self): def get_base_ds(nprocs): fields, units = [], [] - for fname, (code_units, _aliases, _dn) in StreamFieldInfo.known_other_fields: + for fname, (code_units, *_) in StreamFieldInfo.known_other_fields: fields.append(("gas", fname)) units.append(code_units) From a72c63d3c139b4c3a729c2b8012d23b113bd724f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:28:28 +0200 Subject: [PATCH 184/653] PEP 3132 tuple unpacking Co-authored-by: Corentin Cadiou --- yt/frontends/adaptahop/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/adaptahop/io.py b/yt/frontends/adaptahop/io.py index eccdfc0b19a..5f08c484cc8 100644 --- a/yt/frontends/adaptahop/io.py +++ b/yt/frontends/adaptahop/io.py @@ -67,7 +67,7 @@ def _read_particle_fields(self, chunks, ptf, selector): data_files.update(obj.data_files) def iterate_over_attributes(attr_list): - for attr, _length, _dtype in attr_list: + for attr, *_ in attr_list: if isinstance(attr, tuple): for a in attr: yield a From 32ac12db78162ddb8327f7840f7593d091933f73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:29:01 +0200 Subject: [PATCH 185/653] PEP 3132 tuple unpacking Co-authored-by: Corentin Cadiou --- yt/frontends/adaptahop/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/adaptahop/io.py b/yt/frontends/adaptahop/io.py index 5f08c484cc8..0cc834a5f92 100644 --- a/yt/frontends/adaptahop/io.py +++ b/yt/frontends/adaptahop/io.py @@ -273,7 +273,7 @@ def _todo_from_attributes(attributes): def _find_attr_position(key): j = 0 - for attrs, _l, _k in HALO_ATTRIBUTES: + for attrs, *_ in HALO_ATTRIBUTES: if not isinstance(attrs, tuple): attrs = (attrs,) for a in attrs: From 9f13cbe480b2b2d19665d19896d28fd3b4665620 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:29:46 +0200 Subject: [PATCH 186/653] PEP 3132 tuple unpacking Co-authored-by: Corentin Cadiou --- yt/data_objects/particle_trajectories.py | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 5a2db29cb6a..000878b94e5 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -111,9 +111,8 @@ def __init__( mylog.setLevel(old_level) times = [] - for _fn, (time, _indices, _pfields) in sorted(my_storage.items()): - times.append(time) - self.times = self.data_series[0].arr([time for time in times], times[0].units) + times = [time for _fn, (time, *_) in sorted(my_storage.items())] + self.times = self.data_series[0].arr(times, times[0].units) self.particle_fields = [] output_field = np.empty((self.num_indices, self.num_steps)) From 27645233721240e74dcee8d5b15292593dbc31ea Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:34:33 +0200 Subject: [PATCH 187/653] avoid unnecessary loop Co-authored-by: Corentin Cadiou --- yt/frontends/art/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index cfae85145ec..bf6e34457d0 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -254,8 +254,8 @@ def _parse_parameter_file(self): # read the amr header with open(self._file_amr, "rb") as f: amr_header_vals = fpu.read_attrs(f, amr_header_struct, ">") - for _to_skip in ["tl", "dtl", "tlold", "dtlold", "iSO"]: - fpu.skip(f, endian=">") + n_to_skip = len(("tl", "dtl", "tlold", "dtlold", "iSO")) + fpu.skip(f, n_to_skip, endian=">") (self.ncell) = fpu.read_vector(f, "i", ">")[0] # Try to figure out the root grid dimensions est = int(np.rint(self.ncell ** (1.0 / 3.0))) From 94c5d4ced98948c94cc5d9b1495db4822eb414a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:44:01 +0200 Subject: [PATCH 188/653] avoid sorting twice the same object --- yt/data_objects/particle_trajectories.py | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 000878b94e5..8e600f948ce 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -110,17 +110,15 @@ def __init__( if self.suppress_logging: mylog.setLevel(old_level) - times = [] - times = [time for _fn, (time, *_) in sorted(my_storage.items())] + sorted_storage = sorted(my_storage.items()) + times = [time for _fn, (time, *_) in sorted_storage] self.times = self.data_series[0].arr(times, times[0].units) self.particle_fields = [] output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) for field in ("particle_position_%s" % ax for ax in "xyz"): - for i, (_fn, (_time, indices, pfields)) in enumerate( - sorted(my_storage.items()) - ): + for i, (_fn, (_time, indices, pfields)) in enumerate(sorted_storage): try: # This will fail if particles ids are # duplicate. This is due to the fact that the rhs From 430e92a5b49e40cd1ff87795a827b3956dea757f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 11:51:31 +0200 Subject: [PATCH 189/653] PEP 3132 tuple unpacking Co-authored-by: Corentin Cadiou --- yt/frontends/open_pmd/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 8ce016f1730..e8c3f5de420 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -250,7 +250,7 @@ def _count_grids(self): self.vpg = int(self.dataset.gridsize / 4) # 4Byte per value (f32) # Meshes of the same size do not need separate chunks - for (shape, _spacing, _offset, _unit_si) in set(self.meshshapes.values()): + for shape, *_ in set(self.meshshapes.values()): self.num_grids += min( shape[0], int(np.ceil(reduce(mul, shape) * self.vpg ** -1)) ) From 2394d6c168fd9858f13aae13413f445807f157e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 14:31:40 +0200 Subject: [PATCH 190/653] fix buggy test --- yt/data_objects/tests/test_sph_data_objects.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/tests/test_sph_data_objects.py b/yt/data_objects/tests/test_sph_data_objects.py index 1885bb5af4f..f8c191809f7 100644 --- a/yt/data_objects/tests/test_sph_data_objects.py +++ b/yt/data_objects/tests/test_sph_data_objects.py @@ -170,8 +170,8 @@ def test_ray(): def test_cutting(): ds = fake_sph_orientation_ds() for (normal, center), answer in CUTTING_ANSWERS.items(): - for _ in range(3): - cen = [c + 0.1 * c for c in center] + for i in range(-1, 2): + cen = [c + 0.1 * i for c in center] cut = ds.cutting(normal, cen) assert_equal(cut["gas", "density"].shape[0], answer) From 8008321a400836a017ab9b11aefa83ac821254e2 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 1 Jul 2020 14:39:35 +0100 Subject: [PATCH 191/653] Decrease code duplication in RAMSES frontend --- yt/frontends/ramses/data_structures.py | 2 +- yt/frontends/ramses/field_handlers.py | 90 ++++++++++------- yt/frontends/ramses/particle_handlers.py | 123 ++++------------------- 3 files changed, 71 insertions(+), 144 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a862424f114..3f716955220 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -66,7 +66,7 @@ def __init__(self, ds, domain_id): # Autodetect particle files particle_handlers = [ - PH(ds, self) for PH in get_particle_handlers() if PH.any_exist(ds) + PH(self) for PH in get_particle_handlers() if PH.any_exist(ds) ] self.particle_handlers = particle_handlers for ph in particle_handlers: diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index c076e3c585f..21bb3fdebf2 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -19,7 +19,6 @@ def register_field_handler(ph): FIELD_HANDLERS.add(ph) -PRESENT_FIELD_FILES = {} DETECTED_FIELDS = {} @@ -33,35 +32,13 @@ def __new__(meta, name, bases, class_dict): cls = type.__new__(meta, name, bases, class_dict) if cls.ftype is not None: register_field_handler(cls) - return cls - - -class FieldFileHandler(metaclass=RAMSESFieldFileHandlerRegistry): - """ - Abstract class to handle particles in RAMSES. Each instance - represents a single file (one domain). - - To add support to a new particle file, inherit from this class and - implement all functions containing a `NotImplementedError`. - - See `SinkParticleFileHandler` for an example implementation.""" - - # These properties are static properties - ftype = None # The name to give to the field type - fname = None # The name of the file(s) - attrs = None # The attributes of the header - known_fields = None # A list of tuple containing the field name and its type - config_field = None # Name of the config section (if any) - file_descriptor = None # The name of the file descriptor (if any) + cls._unique_registry = {} + return cls - # These properties are computed dynamically - field_offsets = None # Mapping from field to offset in file - field_types = ( - None # Mapping from field to the type of the data (float, integer, ...) - ) - def __init__(self, domain): +class HandlerMixin: + def setup_handler(self, domain): """ Initalize an instance of the class. This automatically sets the full path to the file. This is not intended to be @@ -70,9 +47,9 @@ def __init__(self, domain): If you need more flexibility, rewrite this function to your need in the inherited class. """ + self.ds = ds = domain.ds self.domain = domain self.domain_id = domain.domain_id - ds = domain.ds basename = os.path.abspath(ds.root_folder) iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) @@ -92,17 +69,23 @@ def __init__(self, domain): self.fname = full_path else: raise FileNotFoundError( - "Could not find fluid file (type: %s). Tried %s" - % (self.ftype, full_path) + "Could not find %s file (type: %s). Tried %s" + % (self._file_type, self.ftype, full_path) ) if self.file_descriptor is not None: - self.file_descriptor = os.path.join(basename, self.file_descriptor) + if ds.num_groups > 0: + # The particle file descriptor is *only* in the first group + self.file_descriptor = os.path.join( + basename, "group_00001", self.file_descriptor + ) + else: + self.file_descriptor = os.path.join(basename, self.file_descriptor) @property def exists(self): """ - This function should return True if the *file* for the domain + This function should return True if the *file* the instance exists. It is called for each file of the type found on the disk. @@ -112,7 +95,7 @@ def exists(self): return os.path.exists(self.fname) @property - def has_part_descriptor(self): + def has_descriptor(self): """ This function should return True if a *file descriptor* exists. @@ -125,9 +108,9 @@ def has_part_descriptor(self): @classmethod def any_exist(cls, ds): """ - This function should return True if the kind of field + This function should return True if the kind of particle represented by the class exists in the dataset. It takes as - argument the class itself - not an instance - and a dataset. + argument the class itself -not an instance- and a dataset. Arguments --------- @@ -139,8 +122,8 @@ def any_exist(cls, ds): the RAMSES Dataset structure to determine if the particle type (e.g. regular particles) exists. """ - if (ds.unique_identifier, cls.ftype) in PRESENT_FIELD_FILES: - return PRESENT_FIELD_FILES[(ds.unique_identifier, cls.ftype)] + if ds.unique_identifier in cls._unique_registry: + return cls._unique_registry[ds.unique_identifier] iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) @@ -148,10 +131,41 @@ def any_exist(cls, ds): os.path.split(ds.parameter_filename)[0], cls.fname.format(iout=iout, icpu=1) ) exists = os.path.exists(fname) - PRESENT_FIELD_FILES[(ds.unique_identifier, cls.ftype)] = exists + cls._unique_registry[ds.unique_identifier] = exists return exists + +class FieldFileHandler(HandlerMixin, metaclass=RAMSESFieldFileHandlerRegistry): + """ + Abstract class to handle particles in RAMSES. Each instance + represents a single file (one domain). + + To add support to a new particle file, inherit from this class and + implement all functions containing a `NotImplementedError`. + + See `SinkParticleFileHandler` for an example implementation.""" + + _file_type = "field" + + # These properties are static properties + ftype = None # The name to give to the field type + fname = None # The name of the file(s) + attrs = None # The attributes of the header + known_fields = None # A list of tuple containing the field name and its type + config_field = None # Name of the config section (if any) + + file_descriptor = None # The name of the file descriptor (if any) + + # These properties are computed dynamically + field_offsets = None # Mapping from field to offset in file + field_types = ( + None # Mapping from field to the type of the data (float, integer, ...) + ) + + def __init__(self, domain): + self.setup_handler(domain) + @classmethod def detect_fields(cls, ds): """ diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 340b6a413bc..3b13d2ed85b 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -5,9 +5,9 @@ from yt.utilities.cython_fortran_utils import FortranFile from .io import _read_part_file_descriptor +from .field_handlers import HandlerMixin PARTICLE_HANDLERS = set() -PRESENT_PART_FILES = {} def get_particle_handlers(): @@ -28,18 +28,21 @@ def __new__(meta, name, bases, class_dict): cls = type.__new__(meta, name, bases, class_dict) if cls.ptype is not None: register_particle_handler(cls) - return cls + cls._unique_registry = {} + return cls -class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): - """ +class ParticleFileHandler(HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): + ''' Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). To add support to a new particle file, inherit from this class and implement all functions containing a `NotImplementedError`. - See `SinkParticleFileHandler` for an example implementation.""" + See `SinkParticleFileHandler` for an example implementation.''' + + _file_type = 'particle' # These properties are static properties ptype = None # The name to give to the particle type @@ -57,49 +60,8 @@ class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): ) local_particle_count = None # The number of particle in the domain - def __init__(self, ds, domain): - """ - Initalize an instance of the class. This automatically sets - the full path to the file. This is not intended to be - overriden in most cases. - - If you need more flexibility, rewrite this function to your - need in the inherited class. - """ - self.ds = ds - self.domain = domain - self.domain_id = domain.domain_id - basename = os.path.abspath(ds.root_folder) - iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) - - if ds.num_groups > 0: - igroup = ((domain.domain_id - 1) // ds.group_size) + 1 - full_path = os.path.join( - basename, - "group_{:05d}".format(igroup), - self.fname.format(iout=iout, icpu=domain.domain_id), - ) - else: - full_path = os.path.join( - basename, self.fname.format(iout=iout, icpu=domain.domain_id) - ) - - if os.path.exists(full_path): - self.fname = full_path - else: - raise FileNotFoundError( - "Could not find particle file (type: %s). Tried %s" - % (self.ptype, full_path) - ) - - if self.file_descriptor is not None: - if ds.num_groups > 0: - # The particle file descriptor is *only* in the first group - self.file_descriptor = os.path.join( - basename, "group_00001", self.file_descriptor - ) - else: - self.file_descriptor = os.path.join(basename, self.file_descriptor) + def __init__(self, domain): + self.setup_handler(domain) # Attempt to read the list of fields from the config file if self.config_field and ytcfg.has_section(self.config_field): @@ -110,59 +72,6 @@ def __init__(self, ds, domain): known_fields.append((field, field_type)) self.known_fields = known_fields - @property - def exists(self): - """ - This function should return True if the *file* the instance - exists. It is called for each file of the type found on the - disk. - - By default, it just returns whether the file exists. Override - it for more complex cases. - """ - return os.path.exists(self.fname) - - @property - def has_part_descriptor(self): - """ - This function should return True if a *file descriptor* - exists. - - By default, it just returns whether the file exists. Override - it for more complex cases. - """ - return os.path.exists(self.file_descriptor) - - @classmethod - def any_exist(cls, ds): - """ - This function should return True if the kind of particle - represented by the class exists in the dataset. It takes as - argument the class itself -not an instance- and a dataset. - - Arguments - --------- - * ds: a Ramses Dataset - - Note - ---- - This function is usually called once at the initialization of - the RAMSES Dataset structure to determine if the particle type - (e.g. regular particles) exists. - """ - if (ds.unique_identifier, cls.ptype) in PRESENT_PART_FILES: - return PRESENT_PART_FILES[(ds.unique_identifier, cls.ptype)] - - iout = int(os.path.basename(ds.parameter_filename).split(".")[0].split("_")[1]) - - fname = os.path.join( - os.path.split(ds.parameter_filename)[0], cls.fname.format(iout=iout, icpu=1) - ) - exists = os.path.exists(fname) - PRESENT_PART_FILES[(ds.unique_identifier, cls.ptype)] = exists - - return exists - def read_header(self): """ This function is called once per file. It should: @@ -231,8 +140,10 @@ def read_header(self): self.local_particle_count = hvals["npart"] extra_particle_fields = self.ds._extra_particle_fields - if self.has_part_descriptor: - particle_fields = _read_part_file_descriptor(self.file_descriptor) + if self.has_descriptor: + particle_fields = ( + _read_part_file_descriptor(self.file_descriptor) + ) else: particle_fields = list(self.known_fields) @@ -345,8 +256,10 @@ def read_header(self): self.local_particle_count = hvals["nsink"] # Read the fields + add the sink properties - if self.has_part_descriptor: - fields = _read_part_file_descriptor(self.file_descriptor) + if self.has_descriptor: + fields = ( + _read_part_file_descriptor(self.file_descriptor) + ) else: fields = list(self.known_fields) From d58f681d81fc0c527b2faa8db26941ddf4cf4849 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 14:39:18 +0200 Subject: [PATCH 192/653] Black pass --- yt/frontends/ramses/particle_handlers.py | 15 ++++++--------- 1 file changed, 6 insertions(+), 9 deletions(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 3b13d2ed85b..954d4c227c7 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -32,17 +32,18 @@ def __new__(meta, name, bases, class_dict): cls._unique_registry = {} return cls + class ParticleFileHandler(HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): - ''' + """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). To add support to a new particle file, inherit from this class and implement all functions containing a `NotImplementedError`. - See `SinkParticleFileHandler` for an example implementation.''' + See `SinkParticleFileHandler` for an example implementation.""" - _file_type = 'particle' + _file_type = "particle" # These properties are static properties ptype = None # The name to give to the particle type @@ -141,9 +142,7 @@ def read_header(self): extra_particle_fields = self.ds._extra_particle_fields if self.has_descriptor: - particle_fields = ( - _read_part_file_descriptor(self.file_descriptor) - ) + particle_fields = _read_part_file_descriptor(self.file_descriptor) else: particle_fields = list(self.known_fields) @@ -257,9 +256,7 @@ def read_header(self): # Read the fields + add the sink properties if self.has_descriptor: - fields = ( - _read_part_file_descriptor(self.file_descriptor) - ) + fields = _read_part_file_descriptor(self.file_descriptor) else: fields = list(self.known_fields) From 34c9056fc8103e11bb3712c5d237967ae5ce36fe Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 20:15:20 +0200 Subject: [PATCH 193/653] fix a bug where non-cartesian coordinate fields would not be correctly added to the save list --- yt/data_objects/data_containers.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 0ea775ff514..94181e98975 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -664,7 +664,7 @@ def save_as_dataset(self, filename=None, fields=None): need_grid_positions = False if need_particle_positions: - for ax in "xyz": + for ax in self.ds.coordinates.axis_order: for ptype in ptypes: p_field = (ptype, "particle_position_%s" % ax) if p_field in self.ds.field_info and p_field not in data: @@ -672,7 +672,7 @@ def save_as_dataset(self, filename=None, fields=None): ftypes[p_field] = p_field[0] data[p_field] = self[p_field] if need_grid_positions: - for ax in "xyz": + for ax in self.ds.coordinates.axis_order: g_field = ("index", ax) if g_field in self.ds.field_info and g_field not in data: data_fields.append(g_field) From 2d95e7d6bd9a27c5ab0bec23bc04992db0ac7d5b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 20:49:07 +0200 Subject: [PATCH 194/653] fix a bug where the geometry attribute was not saved through save_as_dataset --- .../tests/test_io_non_cartesian_data.py | 14 ++++++++++++++ yt/frontends/ytdata/data_structures.py | 1 + yt/frontends/ytdata/utilities.py | 1 + 3 files changed, 16 insertions(+) create mode 100644 yt/data_objects/tests/test_io_non_cartesian_data.py diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_non_cartesian_data.py new file mode 100644 index 00000000000..26b88647233 --- /dev/null +++ b/yt/data_objects/tests/test_io_non_cartesian_data.py @@ -0,0 +1,14 @@ +from tempfile import NamedTemporaryFile + +from yt.frontends.ytdata.data_structures import YTDataContainerDataset +from yt.testing import fake_amr_ds + + +def test_preserve_geometry(): + for geom in ("cartesian", "cylindrical", "spherical"): + ds1 = fake_amr_ds(fields=[("gas", "density")], geometry=geom) + ad = ds1.all_data() + with NamedTemporaryFile(suffix=".h5") as tmpf: + fn = ad.save_as_dataset(tmpf.name, fields=["density"]) + ds2 = YTDataContainerDataset(fn) + assert ds1.geometry == ds2.geometry == geom diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 5a6c41f6e36..a7af8dbdcf5 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -164,6 +164,7 @@ class YTDataset(SavedDataset): "omega_lambda", "dimensionality", "domain_dimensions", + "geometry", "periodicity", "domain_left_edge", "domain_right_edge", diff --git a/yt/frontends/ytdata/utilities.py b/yt/frontends/ytdata/utilities.py index 560dbf97a6c..9551c9a0602 100644 --- a/yt/frontends/ytdata/utilities.py +++ b/yt/frontends/ytdata/utilities.py @@ -76,6 +76,7 @@ def save_as_dataset(ds, filename, data, field_types=None, extra_attrs=None): "current_redshift", "current_time", "domain_dimensions", + "geometry", "periodicity", "cosmological_simulation", "omega_lambda", From 7945d960172d8215b53ab26f8c74f18f18646f6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 21:10:38 +0200 Subject: [PATCH 195/653] add test for grid preservation --- yt/data_objects/tests/test_io_non_cartesian_data.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_non_cartesian_data.py index 26b88647233..30a8f9424ac 100644 --- a/yt/data_objects/tests/test_io_non_cartesian_data.py +++ b/yt/data_objects/tests/test_io_non_cartesian_data.py @@ -4,11 +4,16 @@ from yt.testing import fake_amr_ds -def test_preserve_geometry(): +def test_preserve_geometric_properties(): for geom in ("cartesian", "cylindrical", "spherical"): ds1 = fake_amr_ds(fields=[("gas", "density")], geometry=geom) ad = ds1.all_data() with NamedTemporaryFile(suffix=".h5") as tmpf: fn = ad.save_as_dataset(tmpf.name, fields=["density"]) ds2 = YTDataContainerDataset(fn) + dfl = ds2.derived_field_list assert ds1.geometry == ds2.geometry == geom + + expected = set(ds1.coordinates.axis_order) + actual = set([fname for ftype, fname in dfl]) + assert expected.difference(actual) == set() From 6f325ec118c2a3aa4cfb5a67140186c076d63bb8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 22:48:20 +0200 Subject: [PATCH 196/653] add missing decorator in test --- yt/data_objects/tests/test_io_non_cartesian_data.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_non_cartesian_data.py index 30a8f9424ac..f3eb96ef201 100644 --- a/yt/data_objects/tests/test_io_non_cartesian_data.py +++ b/yt/data_objects/tests/test_io_non_cartesian_data.py @@ -1,9 +1,10 @@ from tempfile import NamedTemporaryFile from yt.frontends.ytdata.data_structures import YTDataContainerDataset -from yt.testing import fake_amr_ds +from yt.testing import fake_amr_ds, requires_module +@requires_module("h5py") def test_preserve_geometric_properties(): for geom in ("cartesian", "cylindrical", "spherical"): ds1 = fake_amr_ds(fields=[("gas", "density")], geometry=geom) From 8ead7c5b4825d43ca0dcd6741d2e5a3cae549596 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 3 Aug 2020 23:05:56 +0200 Subject: [PATCH 197/653] fix implicit reading cartesian geometry (backward compatibility) --- yt/frontends/ytdata/data_structures.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index a7af8dbdcf5..57bd7c1673a 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -106,6 +106,9 @@ def _parse_parameter_file(self): for attr in self._con_attrs: setattr(self, attr, self.parameters.get(attr)) + if self.geometry is None: + self.geometry = "cartesian" + def _with_parameter_file_open(self, f): # This allows subclasses to access the parameter file # while it's still open to get additional information. From c2ec16efb565af264c8100281d1544f8a7e4ea44 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 00:34:03 +0200 Subject: [PATCH 198/653] create tempdir to avoid permission denial on appveyor --- yt/data_objects/tests/test_io_non_cartesian_data.py | 8 +++++--- 1 file changed, 5 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_non_cartesian_data.py index f3eb96ef201..8e0257ffecd 100644 --- a/yt/data_objects/tests/test_io_non_cartesian_data.py +++ b/yt/data_objects/tests/test_io_non_cartesian_data.py @@ -1,4 +1,5 @@ -from tempfile import NamedTemporaryFile +import os +from tempfile import TemporaryDirectory from yt.frontends.ytdata.data_structures import YTDataContainerDataset from yt.testing import fake_amr_ds, requires_module @@ -9,8 +10,9 @@ def test_preserve_geometric_properties(): for geom in ("cartesian", "cylindrical", "spherical"): ds1 = fake_amr_ds(fields=[("gas", "density")], geometry=geom) ad = ds1.all_data() - with NamedTemporaryFile(suffix=".h5") as tmpf: - fn = ad.save_as_dataset(tmpf.name, fields=["density"]) + with TemporaryDirectory() as tmpdir: + tmpf = os.path.join(tmpdir, "savefile.h5") + fn = ad.save_as_dataset(tmpf, fields=["density"]) ds2 = YTDataContainerDataset(fn) dfl = ds2.derived_field_list assert ds1.geometry == ds2.geometry == geom From ccd1129ef63386fd5322a34259f083fb577d0c58 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 07:10:59 +0200 Subject: [PATCH 199/653] add test for implicit cartesian geometry reading --- .../tests/test_io_non_cartesian_data.py | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_non_cartesian_data.py index 8e0257ffecd..c13439bd31a 100644 --- a/yt/data_objects/tests/test_io_non_cartesian_data.py +++ b/yt/data_objects/tests/test_io_non_cartesian_data.py @@ -1,8 +1,12 @@ import os from tempfile import TemporaryDirectory +import numpy as np + +from yt.frontends.ytdata.api import save_as_dataset from yt.frontends.ytdata.data_structures import YTDataContainerDataset from yt.testing import fake_amr_ds, requires_module +from yt.units import YTQuantity @requires_module("h5py") @@ -20,3 +24,14 @@ def test_preserve_geometric_properties(): expected = set(ds1.coordinates.axis_order) actual = set([fname for ftype, fname in dfl]) assert expected.difference(actual) == set() + + +@requires_module("h5py") +def test_default_to_cartesian(): + data = {"density": np.random.random(128)} + ds_attrs = {"current_time": YTQuantity(10, "Myr")} + with TemporaryDirectory() as tmpdir: + tmpf = os.path.join(tmpdir, "savefile.h5") + fn = save_as_dataset(ds_attrs, tmpf, data) + ds2 = YTDataContainerDataset(fn) + assert ds2.geometry == "cartesian" From 7da23bd2fd7ad4651079a72c5575f5bc63e8448c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 07:11:41 +0200 Subject: [PATCH 200/653] fix a missing return statement in yt.save_as_dataset --- yt/frontends/ytdata/utilities.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/ytdata/utilities.py b/yt/frontends/ytdata/utilities.py index 9551c9a0602..4b4853cc5d6 100644 --- a/yt/frontends/ytdata/utilities.py +++ b/yt/frontends/ytdata/utilities.py @@ -139,6 +139,7 @@ def save_as_dataset(ds, filename, data, field_types=None, extra_attrs=None): if "num_elements" not in fh[field_type].attrs: fh[field_type].attrs["num_elements"] = data[field].size fh.close() + return filename def _hdf5_yt_array(fh, field, ds=None): From dcf6b79ffcfdbe28764f903f18ced16ea2a9bc11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 07:15:44 +0200 Subject: [PATCH 201/653] rename test file --- .../tests/{test_io_non_cartesian_data.py => test_io_geometry.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename yt/data_objects/tests/{test_io_non_cartesian_data.py => test_io_geometry.py} (100%) diff --git a/yt/data_objects/tests/test_io_non_cartesian_data.py b/yt/data_objects/tests/test_io_geometry.py similarity index 100% rename from yt/data_objects/tests/test_io_non_cartesian_data.py rename to yt/data_objects/tests/test_io_geometry.py From d7cad9232568c20a5abcd79af4e8e996ed35e10b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 07:33:08 +0200 Subject: [PATCH 202/653] use load instead of class init --- yt/data_objects/tests/test_io_geometry.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/tests/test_io_geometry.py b/yt/data_objects/tests/test_io_geometry.py index c13439bd31a..4716276ab70 100644 --- a/yt/data_objects/tests/test_io_geometry.py +++ b/yt/data_objects/tests/test_io_geometry.py @@ -3,6 +3,7 @@ import numpy as np +from yt.convenience import load from yt.frontends.ytdata.api import save_as_dataset from yt.frontends.ytdata.data_structures import YTDataContainerDataset from yt.testing import fake_amr_ds, requires_module @@ -17,7 +18,8 @@ def test_preserve_geometric_properties(): with TemporaryDirectory() as tmpdir: tmpf = os.path.join(tmpdir, "savefile.h5") fn = ad.save_as_dataset(tmpf, fields=["density"]) - ds2 = YTDataContainerDataset(fn) + ds2 = load(fn) + assert isinstance(ds2, YTDataContainerDataset) dfl = ds2.derived_field_list assert ds1.geometry == ds2.geometry == geom @@ -33,5 +35,5 @@ def test_default_to_cartesian(): with TemporaryDirectory() as tmpdir: tmpf = os.path.join(tmpdir, "savefile.h5") fn = save_as_dataset(ds_attrs, tmpf, data) - ds2 = YTDataContainerDataset(fn) + ds2 = load(fn) assert ds2.geometry == "cartesian" From 44cd48cafe0f6f7067b148eff092aece330007b3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 09:40:47 +0200 Subject: [PATCH 203/653] exp: refactor boxlib dataformat validators (simplify and code deduplication) --- yt/frontends/boxlib/data_structures.py | 109 ++++++++----------------- 1 file changed, 32 insertions(+), 77 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 6d92c98119f..8d5c0eed779 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -675,26 +675,10 @@ def _localize_check(self, fn): def _is_valid(cls, *args, **kwargs): # fill our args output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): - return False header_filename = os.path.join(output_dir, "Header") - jobinfo_filename = os.path.join(output_dir, "job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False - args = inspect.getcallargs(cls.__init__, args, kwargs) - # This might need to be localized somehow - if args["cparam_filename"] is None: - return True # Treat as generic boxlib data - inputs_filename = os.path.join( - os.path.dirname(os.path.abspath(output_dir)), args["cparam_filename"] - ) - if not os.path.exists(inputs_filename) and not os.path.exists(jobinfo_filename): - return True # We have no parameters to go off of - # If we do have either inputs or jobinfo, we should be deferring to a - # different frontend. - return False + # boxlib datasets are always directories, and + # We *know* it's not boxlib if Header doesn't exist. + return os.path.exists(header_filename) def _parse_parameter_file(self): """ @@ -1041,24 +1025,16 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - # fill our args - output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): + if not super(OrionDataset, cls)._is_valid(*args, **kwargs): return False - header_filename = os.path.join(output_dir, "Header") + output_dir = args[0] jobinfo_filename = os.path.join(output_dir, "job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False + args = inspect.getcallargs(cls.__init__, args, kwargs) - # This might need to be localized somehow inputs_filename = os.path.join( os.path.dirname(os.path.abspath(output_dir)), args["cparam_filename"] ) - if not os.path.exists(inputs_filename): - return False - if os.path.exists(jobinfo_filename): + if not os.path.exists(inputs_filename) or os.path.exists(jobinfo_filename): return False # Now we check for all the others warpx_jobinfo_filename = os.path.join(output_dir, "warpx_job_info") @@ -1127,23 +1103,18 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - # fill our args - output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): + if not super(CastroDataset, cls)._is_valid(*args, **kwargs): return False - header_filename = os.path.join(output_dir, "Header") + + output_dir = args[0] jobinfo_filename = os.path.join(output_dir, "job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False + if not os.path.exists(jobinfo_filename): return False + # Now we check for all the others lines = open(jobinfo_filename).readlines() - if any(line.startswith("Castro ") for line in lines): - return True - return False + return any(line.startswith("Castro ") for line in lines) def _parse_parameter_file(self): super(CastroDataset, self)._parse_parameter_file() @@ -1222,23 +1193,18 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - # fill our args - output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): + if not super(MaestroDataset, cls)._is_valid(*args, **kwargs): return False - header_filename = os.path.join(output_dir, "Header") + + output_dir = args[0] jobinfo_filename = os.path.join(output_dir, "job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False + if not os.path.exists(jobinfo_filename): return False + # Now we check the job_info for the mention of maestro lines = open(jobinfo_filename).readlines() - if any(line.startswith("MAESTRO ") for line in lines): - return True - return False + return any(line.startswith("MAESTRO ") for line in lines) def _parse_parameter_file(self): super(MaestroDataset, self)._parse_parameter_file() @@ -1327,25 +1293,20 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - # fill our args - output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): + if not super(NyxDataset, cls)._is_valid(*args, **kwargs): return False - header_filename = os.path.join(output_dir, "Header") + + output_dir = args[0] jobinfo_filename = os.path.join(output_dir, "job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False + if not os.path.exists(jobinfo_filename): return False + # Now we check the job_info for the mention of maestro lines = open(jobinfo_filename).readlines() - if any(line.startswith("Nyx ") for line in lines): - return True - if any(line.startswith("nyx.") for line in lines): - return True - return False + return any(line.startswith("Nyx ") for line in lines) or any( + line.startswith("nyx.") for line in lines + ) def _parse_parameter_file(self): super(NyxDataset, self)._parse_parameter_file() @@ -1621,19 +1582,13 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - # fill our args - output_dir = args[0] - # boxlib datasets are always directories - if not os.path.isdir(output_dir): + if not super(WarpXDataset, cls)._is_valid(*args, **kwargs): return False - header_filename = os.path.join(output_dir, "Header") + + output_dir = args[0] jobinfo_filename = os.path.join(output_dir, "warpx_job_info") - if not os.path.exists(header_filename): - # We *know* it's not boxlib if Header doesn't exist. - return False - if os.path.exists(jobinfo_filename): - return True - return False + + return os.path.exists(jobinfo_filename) def _parse_parameter_file(self): super(WarpXDataset, self)._parse_parameter_file() From 023b72aa897694fe5e953bede167679f6d91a5e2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 11:52:40 +0200 Subject: [PATCH 204/653] fix a bug where some castro datasets were not recognized (including castro_sod_x_plt00036 in the sample database) --- yt/frontends/boxlib/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 8d5c0eed779..ae1b5d13734 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1113,8 +1113,8 @@ def _is_valid(cls, *args, **kwargs): return False # Now we check for all the others - lines = open(jobinfo_filename).readlines() - return any(line.startswith("Castro ") for line in lines) + lines = [line.lower() for line in open(jobinfo_filename).readlines()] + return any(line.startswith("castro") for line in lines) def _parse_parameter_file(self): super(CastroDataset, self)._parse_parameter_file() From 91ce97338b50f3753f5596a28265f8b168017837 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 12:05:34 +0200 Subject: [PATCH 205/653] refactor: generalize the validation of boxlib subtypes --- yt/frontends/boxlib/data_structures.py | 100 +++++++------------------ 1 file changed, 29 insertions(+), 71 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index ae1b5d13734..103c0267413 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1,5 +1,4 @@ import glob -import inspect import os import re from collections import namedtuple @@ -680,6 +679,20 @@ def _is_valid(cls, *args, **kwargs): # We *know* it's not boxlib if Header doesn't exist. return os.path.exists(header_filename) + @classmethod + def _is_valid_subtype(cls, output_dir): + # this is used by derived classes + if not BoxlibDataset._is_valid(output_dir): + return False + + jobinfo_filename = os.path.join(output_dir, cls._job_info_basename) + + if not os.path.exists(jobinfo_filename): + return False + + lines = [line.lower() for line in open(jobinfo_filename).readlines()] + return any(line.startswith(cls._subtype_keyword) for line in lines) + def _parse_parameter_file(self): """ Parses the parameter file and establishes the various @@ -1001,6 +1014,8 @@ def _read_particle_file(self, fn): class OrionDataset(BoxlibDataset): _index_class = OrionHierarchy + _subtype_keyword = "hyp." + _job_info_basename = "job_info" def __init__( self, @@ -1025,31 +1040,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - if not super(OrionDataset, cls)._is_valid(*args, **kwargs): - return False - output_dir = args[0] - jobinfo_filename = os.path.join(output_dir, "job_info") - - args = inspect.getcallargs(cls.__init__, args, kwargs) - inputs_filename = os.path.join( - os.path.dirname(os.path.abspath(output_dir)), args["cparam_filename"] - ) - if not os.path.exists(inputs_filename) or os.path.exists(jobinfo_filename): - return False - # Now we check for all the others - warpx_jobinfo_filename = os.path.join(output_dir, "warpx_job_info") - if os.path.exists(warpx_jobinfo_filename): - return False - lines = open(inputs_filename).readlines() - if any(("castro." in line for line in lines)): - return False - if any(("nyx." in line for line in lines)): - return False - if any(("maestro" in line.lower() for line in lines)): - return False - if any(("hyp." in line for line in lines)): - return True - return False + return cls._is_valid_subtype(args[0]) class CastroHierarchy(BoxlibHierarchy): @@ -1079,6 +1070,8 @@ class CastroDataset(BoxlibDataset): _index_class = CastroHierarchy _field_info_class = CastroFieldInfo + _subtype_keyword = "castro" + _job_info_basename = "job_info" def __init__( self, @@ -1103,18 +1096,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - if not super(CastroDataset, cls)._is_valid(*args, **kwargs): - return False - - output_dir = args[0] - jobinfo_filename = os.path.join(output_dir, "job_info") - - if not os.path.exists(jobinfo_filename): - return False - - # Now we check for all the others - lines = [line.lower() for line in open(jobinfo_filename).readlines()] - return any(line.startswith("castro") for line in lines) + return cls._is_valid_subtype(args[0]) def _parse_parameter_file(self): super(CastroDataset, self)._parse_parameter_file() @@ -1169,6 +1151,8 @@ def _parse_parameter_file(self): class MaestroDataset(BoxlibDataset): _field_info_class = MaestroFieldInfo + _subtype_keyword = "maestro" + _job_info_basename = "job_info" def __init__( self, @@ -1193,18 +1177,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - if not super(MaestroDataset, cls)._is_valid(*args, **kwargs): - return False - - output_dir = args[0] - jobinfo_filename = os.path.join(output_dir, "job_info") - - if not os.path.exists(jobinfo_filename): - return False - - # Now we check the job_info for the mention of maestro - lines = open(jobinfo_filename).readlines() - return any(line.startswith("MAESTRO ") for line in lines) + return cls._is_valid_subtype(args[0]) def _parse_parameter_file(self): super(MaestroDataset, self)._parse_parameter_file() @@ -1269,6 +1242,8 @@ class NyxDataset(BoxlibDataset): _index_class = NyxHierarchy _field_info_class = NyxFieldInfo + _subtype_keyword = "nyx" + _job_info_basename = "job_info" def __init__( self, @@ -1293,20 +1268,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - if not super(NyxDataset, cls)._is_valid(*args, **kwargs): - return False - - output_dir = args[0] - jobinfo_filename = os.path.join(output_dir, "job_info") - - if not os.path.exists(jobinfo_filename): - return False - - # Now we check the job_info for the mention of maestro - lines = open(jobinfo_filename).readlines() - return any(line.startswith("Nyx ") for line in lines) or any( - line.startswith("nyx.") for line in lines - ) + return cls._is_valid_subtype(args[0]) def _parse_parameter_file(self): super(NyxDataset, self)._parse_parameter_file() @@ -1554,6 +1516,8 @@ class WarpXDataset(BoxlibDataset): _index_class = WarpXHierarchy _field_info_class = WarpXFieldInfo + _subtype_keyword = "warpx" + _job_info_basename = "warpx_job_info" def __init__( self, @@ -1582,13 +1546,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - if not super(WarpXDataset, cls)._is_valid(*args, **kwargs): - return False - - output_dir = args[0] - jobinfo_filename = os.path.join(output_dir, "warpx_job_info") - - return os.path.exists(jobinfo_filename) + return cls._is_valid_subtype(args[0]) def _parse_parameter_file(self): super(WarpXDataset, self)._parse_parameter_file() From 48e4e9986f8f797a2ad99b46e6a04293f315702b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 12:20:08 +0200 Subject: [PATCH 206/653] partly fix a bug where parsing a Nyx dataset without cosmology info would break with StopIteration --- yt/frontends/boxlib/data_structures.py | 28 ++++++++++++++++---------- 1 file changed, 17 insertions(+), 11 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 103c0267413..d5ca7454003 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1277,24 +1277,30 @@ def _parse_parameter_file(self): self.cosmological_simulation = 1 jobinfo_filename = os.path.join(self.output_dir, "job_info") - line = "" + + has_cosmo_info = False with open(jobinfo_filename, "r") as f: - while not line.startswith(" Cosmology Information"): + for line in f: # get the code git hashes if "git hash" in line: # line format: codename git hash: the-hash fields = line.split(":") self.parameters[fields[0]] = fields[1].strip() - line = next(f) - # get the cosmology - for line in f: - if "Omega_m (comoving)" in line: - self.omega_matter = float(line.split(":")[1]) - elif "Omega_lambda (comoving)" in line: - self.omega_lambda = float(line.split(":")[1]) - elif "h (comoving)" in line: - self.hubble_constant = float(line.split(":")[1]) + if line.startswith(" Cosmology Information"): + has_cosmo_info = True + break + + if has_cosmo_info: + for line in f: + if "Omega_m (comoving)" in line: + self.omega_matter = float(line.split(":")[1]) + elif "Omega_lambda (comoving)" in line: + self.omega_lambda = float(line.split(":")[1]) + elif "h (comoving)" in line: + self.hubble_constant = float(line.split(":")[1]) + else: + raise NotImplementedError # Read in the `comoving_a` file and parse the value. We should fix this # in the new Nyx output format... From 87da580cc7ae156d2687fc3e005ff18c208aea96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 15:50:27 +0200 Subject: [PATCH 207/653] add tests --- yt/frontends/boxlib/tests/test_outputs.py | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/yt/frontends/boxlib/tests/test_outputs.py b/yt/frontends/boxlib/tests/test_outputs.py index 289d07f0203..6a3e0e9418c 100644 --- a/yt/frontends/boxlib/tests/test_outputs.py +++ b/yt/frontends/boxlib/tests/test_outputs.py @@ -255,14 +255,13 @@ def test_warpx_particle_io(): _raw_fields = [("raw", "Bx"), ("raw", "Ey"), ("raw", "jz")] -raw_fields = "Laser/plt00015" +laser = "Laser/plt00015" -@requires_ds(raw_fields) +@requires_ds(laser) def test_raw_fields(): - ds_fn = raw_fields for field in _raw_fields: - yield GridValuesTest(ds_fn, field) + yield GridValuesTest(laser, field) @requires_file(rt) @@ -275,16 +274,31 @@ def test_NyxDataset(): assert isinstance(data_dir_load(LyA), NyxDataset) +@requires_file("nyx_small") +def test_NyxDataset_2(): + assert isinstance(data_dir_load("nyx_small"), NyxDataset) + + @requires_file(RT_particles) def test_CastroDataset(): assert isinstance(data_dir_load(RT_particles), CastroDataset) +@requires_file("castro_sod_x_plt00036") +def test_CastroDataset_2(): + assert isinstance(data_dir_load("castro_sod_x_plt00036"), CastroDataset) + + @requires_file(LyA) def test_WarpXDataset(): assert isinstance(data_dir_load(plasma), WarpXDataset) +@requires_ds(laser) +def test_WarpXDataset_2(): + assert isinstance(data_dir_load(laser), WarpXDataset) + + @requires_file(rt) def test_units_override(): units_override_check(rt) From 87c3e4649890a76eaaef5a7ea37a266633d2749e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 16:55:27 +0200 Subject: [PATCH 208/653] a more general lookup algorithm that should work with Orion and WarpX --- yt/frontends/boxlib/data_structures.py | 45 ++++++++++++++------------ 1 file changed, 24 insertions(+), 21 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index d5ca7454003..82254223c32 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1,4 +1,5 @@ import glob +import inspect import os import re from collections import namedtuple @@ -680,18 +681,25 @@ def _is_valid(cls, *args, **kwargs): return os.path.exists(header_filename) @classmethod - def _is_valid_subtype(cls, output_dir): + def _is_valid_subtype(cls, *args, **kwargs): # this is used by derived classes + output_dir = args[0] + if not BoxlibDataset._is_valid(output_dir): return False - jobinfo_filename = os.path.join(output_dir, cls._job_info_basename) - - if not os.path.exists(jobinfo_filename): + iargs = inspect.getcallargs(cls.__init__, args, kwargs) + lookup_table = [ + os.path.abspath(os.path.join(p, iargs["cparam_filename"])) + for p in (output_dir, os.path.dirname(output_dir)) + ] + found = [os.path.exists(file) for file in lookup_table] + if not any(found): return False - lines = [line.lower() for line in open(jobinfo_filename).readlines()] - return any(line.startswith(cls._subtype_keyword) for line in lines) + cparam_filepath = lookup_table[found.index(True)] + lines = [line.lower() for line in open(cparam_filepath).readlines()] + return any(cls._subtype_keyword in line for line in lines) def _parse_parameter_file(self): """ @@ -1015,7 +1023,6 @@ class OrionDataset(BoxlibDataset): _index_class = OrionHierarchy _subtype_keyword = "hyp." - _job_info_basename = "job_info" def __init__( self, @@ -1040,7 +1047,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - return cls._is_valid_subtype(args[0]) + return cls._is_valid_subtype(*args, **kwargs) class CastroHierarchy(BoxlibHierarchy): @@ -1071,12 +1078,11 @@ class CastroDataset(BoxlibDataset): _index_class = CastroHierarchy _field_info_class = CastroFieldInfo _subtype_keyword = "castro" - _job_info_basename = "job_info" def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, @@ -1096,7 +1102,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - return cls._is_valid_subtype(args[0]) + return cls._is_valid_subtype(*args, **kwargs) def _parse_parameter_file(self): super(CastroDataset, self)._parse_parameter_file() @@ -1152,12 +1158,11 @@ class MaestroDataset(BoxlibDataset): _field_info_class = MaestroFieldInfo _subtype_keyword = "maestro" - _job_info_basename = "job_info" def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, @@ -1177,7 +1182,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - return cls._is_valid_subtype(args[0]) + return cls._is_valid_subtype(*args, **kwargs) def _parse_parameter_file(self): super(MaestroDataset, self)._parse_parameter_file() @@ -1243,12 +1248,11 @@ class NyxDataset(BoxlibDataset): _index_class = NyxHierarchy _field_info_class = NyxFieldInfo _subtype_keyword = "nyx" - _job_info_basename = "job_info" def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, @@ -1268,7 +1272,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - return cls._is_valid_subtype(args[0]) + return cls._is_valid_subtype(*args, **kwargs) def _parse_parameter_file(self): super(NyxDataset, self)._parse_parameter_file() @@ -1523,12 +1527,11 @@ class WarpXDataset(BoxlibDataset): _index_class = WarpXHierarchy _field_info_class = WarpXFieldInfo _subtype_keyword = "warpx" - _job_info_basename = "warpx_job_info" def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="warpx_job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, @@ -1552,7 +1555,7 @@ def __init__( @classmethod def _is_valid(cls, *args, **kwargs): - return cls._is_valid_subtype(args[0]) + return cls._is_valid_subtype(*args, **kwargs) def _parse_parameter_file(self): super(WarpXDataset, self)._parse_parameter_file() @@ -1610,7 +1613,7 @@ class AMReXDataset(BoxlibDataset): def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, From c1657f398161b8457b35472742eefa0dd933a362 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 17:33:58 +0200 Subject: [PATCH 209/653] fix parameter parsing for some maestro datasets --- yt/frontends/boxlib/data_structures.py | 35 ++++++++++++++------------ 1 file changed, 19 insertions(+), 16 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 82254223c32..829aad3190e 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1187,35 +1187,38 @@ def _is_valid(cls, *args, **kwargs): def _parse_parameter_file(self): super(MaestroDataset, self)._parse_parameter_file() jobinfo_filename = os.path.join(self.output_dir, "job_info") - line = "" + with open(jobinfo_filename, "r") as f: - while not line.startswith(" [*] indicates overridden default"): + for line in f: # get the code git hashes if "git hash" in line: # line format: codename git hash: the-hash fields = line.split(":") self.parameters[fields[0]] = fields[1].strip() - line = next(f) + + with open(jobinfo_filename, "r") as f: # get the runtime parameters for line in f: - p, v = (_.strip() for _ in line[4:].split("=", 1)) - if len(v) == 0: - self.parameters[p] = "" - else: - self.parameters[p] = _guess_pcast(v) + try: + p, v = (_.strip() for _ in line[4:].split("=", 1)) + if len(v) == 0: + self.parameters[p] = "" + else: + self.parameters[p] = _guess_pcast(v) + except ValueError: + # not a parameter line + pass + # hydro method is set by the base class -- override it here self.parameters["HydroMethod"] = "Maestro" # set the periodicity based on the integer BC runtime parameters periodicity = [True, True, True] - if not self.parameters["bcx_lo"] == -1: - periodicity[0] = False - - if not self.parameters["bcy_lo"] == -1: - periodicity[1] = False - - if not self.parameters["bcz_lo"] == -1: - periodicity[2] = False + for i, ax in enumerate("xyz"): + try: + periodicity[i] = self.parameters[f"bc{ax}_lo"] != -1 + except KeyError: + pass self.periodicity = ensure_tuple(periodicity) From 95fc603bbedf1c185d3a8cfbb5fde6062af5d670 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 18:00:36 +0200 Subject: [PATCH 210/653] fix bugs were cparams_filename was hardcoded downstream instead of using the instance attribute --- yt/frontends/boxlib/data_structures.py | 44 ++++++++++++++++---------- 1 file changed, 27 insertions(+), 17 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 829aad3190e..bdcc5575694 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -626,7 +626,7 @@ class BoxlibDataset(Dataset): def __init__( self, output_dir, - cparam_filename=None, + cparam_filename="job_info", fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, @@ -642,7 +642,9 @@ def __init__( """ self.fluid_types += ("boxlib",) self.output_dir = os.path.abspath(os.path.expanduser(output_dir)) - self.cparam_filename = self._localize_check(cparam_filename) + self.cparam_filename = self._lookup_cparam_filepath( + output_dir, cparam_filename=cparam_filename + ) self.fparam_filename = self._localize_check(fparam_filename) self.storage_filename = storage_filename @@ -681,23 +683,32 @@ def _is_valid(cls, *args, **kwargs): return os.path.exists(header_filename) @classmethod - def _is_valid_subtype(cls, *args, **kwargs): - # this is used by derived classes + def _lookup_cparam_filepath(cls, *args, **kwargs): output_dir = args[0] - - if not BoxlibDataset._is_valid(output_dir): - return False - iargs = inspect.getcallargs(cls.__init__, args, kwargs) lookup_table = [ os.path.abspath(os.path.join(p, iargs["cparam_filename"])) for p in (output_dir, os.path.dirname(output_dir)) ] found = [os.path.exists(file) for file in lookup_table] + if not any(found): + return None + + return lookup_table[found.index(True)] + + @classmethod + def _is_valid_subtype(cls, *args, **kwargs): + # this is used by derived classes + output_dir = args[0] + + if not BoxlibDataset._is_valid(output_dir): + return False + + cparam_filepath = cls._lookup_cparam_filepath(*args, **kwargs) + if cparam_filepath is None: return False - cparam_filepath = lookup_table[found.index(True)] lines = [line.lower() for line in open(cparam_filepath).readlines()] return any(cls._subtype_keyword in line for line in lines) @@ -718,11 +729,10 @@ def _parse_cparams(self): if self.cparam_filename is None: return for line in (line.split("#")[0].strip() for line in open(self.cparam_filename)): - if "=" not in line: - continue - if len(line) == 0: + try: + param, vals = [s.strip() for s in line.split("=")] + except ValueError: continue - param, vals = [s.strip() for s in line.split("=")] if param == "amr.n_cell": vals = self.domain_dimensions = np.array(vals.split(), dtype="int32") @@ -1106,7 +1116,7 @@ def _is_valid(cls, *args, **kwargs): def _parse_parameter_file(self): super(CastroDataset, self)._parse_parameter_file() - jobinfo_filename = os.path.join(self.output_dir, "job_info") + jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename) line = "" with open(jobinfo_filename, "r") as f: while not line.startswith(" Inputs File Parameters"): @@ -1186,7 +1196,7 @@ def _is_valid(cls, *args, **kwargs): def _parse_parameter_file(self): super(MaestroDataset, self)._parse_parameter_file() - jobinfo_filename = os.path.join(self.output_dir, "job_info") + jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename) with open(jobinfo_filename, "r") as f: for line in f: @@ -1283,7 +1293,7 @@ def _parse_parameter_file(self): # Nyx is always cosmological. self.cosmological_simulation = 1 - jobinfo_filename = os.path.join(self.output_dir, "job_info") + jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename) has_cosmo_info = False with open(jobinfo_filename, "r") as f: @@ -1562,7 +1572,7 @@ def _is_valid(cls, *args, **kwargs): def _parse_parameter_file(self): super(WarpXDataset, self)._parse_parameter_file() - jobinfo_filename = os.path.join(self.output_dir, "warpx_job_info") + jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename) with open(jobinfo_filename, "r") as f: for line in f.readlines(): if _skip_line(line): From 1b56edca6c2ce178b0484ea2cbb6dbb34fac9085 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 18:12:14 +0200 Subject: [PATCH 211/653] fix a wrong file specification in test --- yt/frontends/boxlib/tests/test_outputs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/boxlib/tests/test_outputs.py b/yt/frontends/boxlib/tests/test_outputs.py index 6a3e0e9418c..90bdb943034 100644 --- a/yt/frontends/boxlib/tests/test_outputs.py +++ b/yt/frontends/boxlib/tests/test_outputs.py @@ -274,9 +274,9 @@ def test_NyxDataset(): assert isinstance(data_dir_load(LyA), NyxDataset) -@requires_file("nyx_small") +@requires_file("nyx_small/nyx_small_00000") def test_NyxDataset_2(): - assert isinstance(data_dir_load("nyx_small"), NyxDataset) + assert isinstance(data_dir_load("nyx_small/nyx_small_00000"), NyxDataset) @requires_file(RT_particles) From 8b1681d078d3e930c6ac0a56c794e7767ef0145a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 18:52:17 +0200 Subject: [PATCH 212/653] choose the easy route to fix the non-cosmological case with nyx --- yt/frontends/boxlib/data_structures.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index bdcc5575694..c7889100d76 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -1290,12 +1290,8 @@ def _is_valid(cls, *args, **kwargs): def _parse_parameter_file(self): super(NyxDataset, self)._parse_parameter_file() - # Nyx is always cosmological. - self.cosmological_simulation = 1 - jobinfo_filename = os.path.join(self.output_dir, self.cparam_filename) - has_cosmo_info = False with open(jobinfo_filename, "r") as f: for line in f: # get the code git hashes @@ -1305,10 +1301,14 @@ def _parse_parameter_file(self): self.parameters[fields[0]] = fields[1].strip() if line.startswith(" Cosmology Information"): - has_cosmo_info = True + self.cosmological_simulation = 1 break + else: + self.cosmological_simulation = 0 - if has_cosmo_info: + if self.cosmological_simulation: + # note that modern Nyx is always cosmological, but there are some old + # files without these parameters so we want to special-case them for line in f: if "Omega_m (comoving)" in line: self.omega_matter = float(line.split(":")[1]) @@ -1316,8 +1316,6 @@ def _parse_parameter_file(self): self.omega_lambda = float(line.split(":")[1]) elif "h (comoving)" in line: self.hubble_constant = float(line.split(":")[1]) - else: - raise NotImplementedError # Read in the `comoving_a` file and parse the value. We should fix this # in the new Nyx output format... From 962daf30018857234606caa640c0af225b21ab64 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 19:59:48 +0200 Subject: [PATCH 213/653] bugfix: add default real number type for for old nyx files --- yt/frontends/boxlib/data_structures.py | 25 +++++++++++++++---------- 1 file changed, 15 insertions(+), 10 deletions(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index c7889100d76..d1afcb0c08d 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -2,6 +2,7 @@ import inspect import os import re +import warnings from collections import namedtuple from stat import ST_CTIME @@ -125,18 +126,22 @@ class BoxLibParticleHeader: def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None): self.particle_type = directory_name - header_filename = ds.output_dir + "/" + directory_name + "/Header" + header_filename = os.path.join(ds.output_dir, directory_name, "Header") with open(header_filename, "r") as f: self.version_string = f.readline().strip() particle_real_type = self.version_string.split("_")[-1] - particle_real_type = self.version_string.split("_")[-1] - if particle_real_type == "double": - self.real_type = np.float64 - elif particle_real_type == "single": - self.real_type = np.float32 - else: - raise RuntimeError("yt did not recognize particle real type.") + known_real_types = {"double": np.float64, "single": np.float32} + try: + self.real_type = known_real_types[particle_real_type] + except KeyError: + warnings.warn( + f"yt did not recognize particle real type {particle_real_type}" + "assuming double", + category=RuntimeWarning, + ) + self.real_type = known_real_types["double"] + self.int_type = np.int32 self.dim = int(f.readline().strip()) @@ -231,14 +236,14 @@ class AMReXParticleHeader: def __init__(self, ds, directory_name, is_checkpoint, extra_field_names=None): self.particle_type = directory_name - header_filename = ds.output_dir + "/" + directory_name + "/Header" + header_filename = os.path.join(ds.output_dir, directory_name, "Header") self.real_component_names = [] self.int_component_names = [] with open(header_filename, "r") as f: self.version_string = f.readline().strip() particle_real_type = self.version_string.split("_")[-1] - particle_real_type = self.version_string.split("_")[-1] + if particle_real_type == "double": self.real_type = np.float64 elif particle_real_type == "single": From 4b9110b935d9d54ac4b0e07b0f4c1ecabb4e041d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 4 Aug 2020 20:09:04 +0200 Subject: [PATCH 214/653] bugfix: fix cparams parser in BoxlibDataset --- yt/frontends/boxlib/data_structures.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index d1afcb0c08d..fc830a305b4 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -757,7 +757,11 @@ def _parse_cparams(self): elif param == "castro.use_comoving": vals = self.cosmological_simulation = int(vals) else: - vals = _guess_pcast(vals) + try: + vals = _guess_pcast(vals) + except IndexError: + # hitting an empty string + vals = None self.parameters[param] = vals if getattr(self, "cosmological_simulation", 0) == 1: From 03467eb301dda8c6767ec4d5a5eade7e6b2cab58 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 12:18:52 +0200 Subject: [PATCH 215/653] Isort pass --- yt/frontends/ramses/particle_handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 954d4c227c7..a141c5bb717 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -4,8 +4,8 @@ from yt.funcs import mylog from yt.utilities.cython_fortran_utils import FortranFile -from .io import _read_part_file_descriptor from .field_handlers import HandlerMixin +from .io import _read_part_file_descriptor PARTICLE_HANDLERS = set() From 058aa8d1a698383a8ce88d0444cffc22df862955 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 13:33:22 +0200 Subject: [PATCH 216/653] add note --- yt/frontends/boxlib/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index fc830a305b4..3bb358a8fa2 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -631,7 +631,7 @@ class BoxlibDataset(Dataset): def __init__( self, output_dir, - cparam_filename="job_info", + cparam_filename="job_info", # todo: harmonise this default value with docstring fparam_filename=None, dataset_type="boxlib_native", storage_filename=None, From 28ce6d312d998c656083cf1e90a25a8ceafffeac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 14:31:27 +0200 Subject: [PATCH 217/653] bump boxlib answer tests --- tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 893ee953a73..61e1f04a8bd 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -131,7 +131,7 @@ answer_tests: - yt/frontends/boxlib/tests/test_outputs.py:test_units_override - yt/frontends/boxlib/tests/test_outputs.py:test_raw_fields - local_boxlib_particles_006: + local_boxlib_particles_007: - yt/frontends/boxlib/tests/test_outputs.py:test_LyA - yt/frontends/boxlib/tests/test_outputs.py:test_nyx_particle_io - yt/frontends/boxlib/tests/test_outputs.py:test_castro_particle_io From dc37cbf2cc8f4335b3b01c8d8b9657a0005fcd36 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 14:50:26 +0200 Subject: [PATCH 218/653] add a test to check params are read correctly for the Beam dataset --- yt/frontends/boxlib/tests/test_outputs.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/yt/frontends/boxlib/tests/test_outputs.py b/yt/frontends/boxlib/tests/test_outputs.py index 90bdb943034..6f1e4a4d974 100644 --- a/yt/frontends/boxlib/tests/test_outputs.py +++ b/yt/frontends/boxlib/tests/test_outputs.py @@ -196,6 +196,11 @@ def test_plasma(): def test_beam(): ds = data_dir_load(beam) assert_equal(str(ds), "plt03008") + for param in ("number of boxes", "maximum zones"): + # PR 2807 + # these parameters are only populated if the config file attached to this + # dataset is read correctly + assert param in ds.parameters for test in small_patch_amr(ds, _warpx_fields, input_center="c", input_weight="Ex"): test_beam.__name__ = test.description yield test From aba36cfb5e01a035ca4ac6578e60079d49c84cd4 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 11:03:13 +0100 Subject: [PATCH 219/653] Add index_order to HaloCatalogDataset. --- yt/frontends/halo_catalog/data_structures.py | 19 +++++++------------ 1 file changed, 7 insertions(+), 12 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 5919b95ee77..0ae92daeaa3 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -2,7 +2,7 @@ import numpy as np -from yt.data_objects.static_output import ParticleFile +from yt.data_objects.static_output import ParticleFile, validate_index_order from yt.frontends.ytdata.data_structures import SavedDataset from yt.funcs import parse_h5_attr from yt.geometry.particle_geometry_handler import ParticleIndex @@ -10,7 +10,6 @@ from .fields import HaloCatalogFieldInfo - class HaloCatalogParticleIndex(ParticleIndex): def _setup_filenames(self): template = self.dataset.filename_template @@ -98,24 +97,20 @@ class HaloCatalogDataset(SavedDataset): _file_class = HaloCatalogHDF5File _field_info_class = HaloCatalogFieldInfo _suffix = ".h5" - _con_attrs = ( - "cosmological_simulation", - "current_time", - "current_redshift", - "hubble_constant", - "omega_matter", - "omega_lambda", - "domain_left_edge", - "domain_right_edge", - ) + _con_attrs = ("cosmological_simulation", + "current_time", "current_redshift", + "hubble_constant", "omega_matter", "omega_lambda", + "domain_left_edge", "domain_right_edge") def __init__( self, filename, dataset_type="halocatalog_hdf5", + index_order=None, units_override=None, unit_system="cgs", ): + self.index_order = validate_index_order(index_order) super(HaloCatalogDataset, self).__init__( filename, dataset_type, From 3eeaeb2d0138a50fd46bf9c2412e2bd797b64790 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 11:06:28 +0100 Subject: [PATCH 220/653] Do not identify groups as fields. --- yt/frontends/halo_catalog/io.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 108d054e284..b5588c6f1a8 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -104,9 +104,10 @@ def _count_particles(self, data_file): return {"halos": nhalos} def _identify_fields(self, data_file): - with h5py.File(data_file.filename, mode="r") as f: - fields = [("halos", field) for field in f] - units = dict( - [(("halos", field), parse_h5_attr(f[field], "units")) for field in f] - ) + with h5py.File(data_file.filename, "r") as f: + fields = [("halos", field) for field in f + if not isinstance(f[field], h5py.Group)] + units = dict([(("halos", field), + parse_h5_attr(f[field], "units")) + for field in f]) return fields, units From 2171b97ba6e16f43d6c4fe7b8a5f4557d8ddb1d6 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 13:35:48 +0100 Subject: [PATCH 221/653] Make cosmology calculator setup its own method. --- yt/data_objects/static_output.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 0ad62cd35c4..0bfea3e171e 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -255,6 +255,7 @@ def __init__( self._parse_parameter_file() self.set_units() + self.setup_cosmology() self._assign_unit_system(unit_system) self._setup_coordinate_handler() @@ -1129,6 +1130,7 @@ def set_units(self): self.set_code_units() + def setup_cosmology(self): if getattr(self, "cosmological_simulation", False): # this dataset is cosmological, add a cosmology object From e708d87b072321eae201219522e8647c2d58a235 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 15:45:13 +0100 Subject: [PATCH 222/653] Do not rerun _setup_filenames if already called. --- yt/geometry/particle_geometry_handler.py | 3 +++ 1 file changed, 3 insertions(+) diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 449d7bc7cd1..626d59ff705 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -46,6 +46,9 @@ def convert(self, unit): return self.dataset.conversion_factors[unit] def _setup_filenames(self): + if hasattr(self, "data_files"): + return + template = self.dataset.filename_template ndoms = self.dataset.file_count cls = self.dataset._file_class From 11454cd7510dc0a923f3f4e430cca41d8508396d Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 15:47:56 +0100 Subject: [PATCH 223/653] Spelling mistake. --- yt/frontends/gadget_fof/api.py | 2 +- yt/frontends/gadget_fof/data_structures.py | 7 +++---- 2 files changed, 4 insertions(+), 5 deletions(-) diff --git a/yt/frontends/gadget_fof/api.py b/yt/frontends/gadget_fof/api.py index 02e041fa7a4..7a59e55149c 100644 --- a/yt/frontends/gadget_fof/api.py +++ b/yt/frontends/gadget_fof/api.py @@ -5,7 +5,7 @@ GadgetFOFHaloParticleIndex, GadgetFOFHDF5File, GadgetFOFParticleIndex, - GagdetFOFHaloContainer, + GadgetFOFHaloContainer, ) from .fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo from .io import IOHandlerGadgetFOFHaloHDF5, IOHandlerGadgetFOFHDF5 diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index dc6efe24325..81019371995 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -205,7 +205,7 @@ def _halos_ds(self): def _setup_classes(self): super(GadgetFOFDataset, self)._setup_classes() - self.halo = partial(GagdetFOFHaloContainer, ds=self._halos_ds) + self.halo = partial(GadgetFOFHaloContainer, ds=self._halos_ds) def _parse_parameter_file(self): with h5py.File(self.parameter_filename, mode="r") as f: @@ -502,8 +502,7 @@ def __repr__(self): def _setup_classes(self): self.objects = [] - -class GagdetFOFHaloContainer(YTSelectionContainer): +class GadgetFOFHaloContainer(YTSelectionContainer): """ Create a data container to get member particles and individual values from halos and subhalos. Halo mass, position, and @@ -590,7 +589,7 @@ def __init__(self, ptype, particle_identifier, ds=None): self.ptype = ptype self._current_particle_type = ptype - super(GagdetFOFHaloContainer, self).__init__(ds, {}) + super(GadgetFOFHaloContainer, self).__init__(ds, {}) if ptype == "Subhalo" and isinstance(particle_identifier, tuple): self.group_identifier, self.subgroup_identifier = particle_identifier From e05069c2a978782115ab56c5df33955ff52bf0e0 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 15:48:20 +0100 Subject: [PATCH 224/653] Implement up to halos_field_list. --- yt/frontends/halo_catalog/data_structures.py | 487 +++++++++++++++++-- yt/frontends/halo_catalog/fields.py | 31 +- yt/frontends/halo_catalog/io.py | 144 ++++++ 3 files changed, 612 insertions(+), 50 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 0ae92daeaa3..d4245e96799 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -1,39 +1,28 @@ +from collections import defaultdict +from functools import partial import glob - -import numpy as np - -from yt.data_objects.static_output import ParticleFile, validate_index_order -from yt.frontends.ytdata.data_structures import SavedDataset -from yt.funcs import parse_h5_attr -from yt.geometry.particle_geometry_handler import ParticleIndex from yt.utilities.on_demand_imports import _h5py as h5py +import numpy as np +import os +import weakref -from .fields import HaloCatalogFieldInfo - -class HaloCatalogParticleIndex(ParticleIndex): - def _setup_filenames(self): - template = self.dataset.filename_template - ndoms = self.dataset.file_count - cls = self.dataset._file_class - if ndoms > 1: - self.data_files = [ - cls(self.dataset, self.io, template % {"num": i}, i, range=None) - for i in range(ndoms) - ] - else: - self.data_files = [ - cls( - self.dataset, - self.io, - self.dataset.parameter_filename, - 0, - range=None, - ) - ] - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) +from .fields import \ + HaloCatalogFieldInfo, \ + HaloCatalogHaloFieldInfo +from yt.data_objects.data_containers import \ + YTSelectionContainer +from yt.data_objects.static_output import \ + ParticleDataset +from yt.frontends.ytdata.data_structures import \ + SavedDataset +from yt.funcs import \ + parse_h5_attr +from yt.geometry.particle_geometry_handler import \ + ParticleIndex +from yt.data_objects.static_output import \ + ParticleFile, \ + validate_index_order class HaloCatalogFile(ParticleFile): def __init__(self, ds, io, filename, file_id, range): @@ -64,11 +53,14 @@ def _get_particle_positions(self, ptype, f=None): class HaloCatalogHDF5File(HaloCatalogFile): def __init__(self, ds, io, filename, file_id, range): - with h5py.File(filename, mode="r") as f: - self.header = dict( - (field, parse_h5_attr(f, field)) for field in f.attrs.keys() - ) - super(HaloCatalogHDF5File, self).__init__(ds, io, filename, file_id, range) + with h5py.File(filename, "r") as f: + self.header = dict((field, parse_h5_attr(f, field)) \ + for field in f.attrs.keys()) + pids = f.get('particles/ids') + self.total_ids = 0 if pids is None else pids.size + self.group_length_sum = self.total_ids + super(HaloCatalogHDF5File, self).__init__( + ds, io, filename, file_id, range) def _read_particle_positions(self, ptype, f=None): """ @@ -118,6 +110,29 @@ def __init__( unit_system=unit_system, ) + def add_field(self, *args, **kwargs): + super(HaloCatalogDataset, self).add_field(*args, **kwargs) + self._halos_ds.add_field(*args, **kwargs) + + @property + def halos_field_list(self): + return self._halos_ds.field_list + + @property + def halos_derived_field_list(self): + return self._halos_ds.derived_field_list + + _instantiated_halo_ds = None + @property + def _halos_ds(self): + if self._instantiated_halo_ds is None: + self._instantiated_halo_ds = HaloCatalogHaloDataset(self) + return self._instantiated_halo_ds + + def _setup_classes(self): + super(HaloCatalogDataset, self)._setup_classes() + self.halo = partial(HaloCatalogHaloContainer, ds=self._halos_ds) + def _parse_parameter_file(self): self.refine_by = 2 self.dimensionality = 3 @@ -126,8 +141,8 @@ def _parse_parameter_file(self): prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2]) self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix) self.file_count = len(glob.glob(prefix + "*" + self._suffix)) - self.particle_types = "halos" - self.particle_types_raw = "halos" + self.particle_types = ("halos",) + self.particle_types_raw = ("halos",) super(HaloCatalogDataset, self)._parse_parameter_file() @classmethod @@ -141,3 +156,397 @@ def _is_valid(self, *args, **kwargs): ): return True return False + +class HaloCatalogHaloParticleIndex(ParticleIndex): + def __init__(self, ds, dataset_type): + self.real_ds = weakref.proxy(ds.real_ds) + super(HaloCatalogHaloParticleIndex, self).__init__(ds, dataset_type) + + def _setup_data_io(self): + super(HaloCatalogHaloParticleIndex, self)._setup_data_io() + self._setup_filenames() + + def _setup_geometry(self): + self._setup_data_io() + + if self.real_ds._instantiated_index is None: + template = self.real_ds.filename_template + ndoms = self.real_ds.file_count + cls = self.real_ds._file_class + self.data_files = \ + [cls(self.dataset, self.io, template % {'num':i}, i, None) + for i in range(ndoms)] + else: + self.data_files = self.real_ds.index.data_files + + self._calculate_particle_index_starts() + self._calculate_particle_count() + self._create_halo_id_table() + + def _calculate_particle_count(self): + """ + Calculate the total number of each type of particle. + """ + self.particle_count = \ + dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw]) + + def _calculate_particle_index_starts(self): + """ + Create a dict of halo id offsets for each file. + """ + particle_count = defaultdict(int) + for data_file in self.data_files: + data_file.index_start = dict([(ptype, particle_count[ptype]) for + ptype in data_file.total_particles]) + for ptype in data_file.total_particles: + particle_count[ptype] += data_file.total_particles[ptype] + + self._halo_index_start = \ + dict([(ptype, np.array([data_file.index_start[ptype] + for data_file in self.data_files])) + for ptype in self.ds.particle_types_raw]) + + def _create_halo_id_table(self): + """ + Create a list of halo start ids so we know which file + contains particles for a given halo. Note, the halo ids + are distributed over all files and so the ids for a given + halo are likely stored in a different file than the halo + itself. + """ + + self._halo_id_number = np.array([data_file.total_ids + for data_file in self.data_files]) + self._halo_id_end = self._halo_id_number.cumsum() + self._halo_id_start = self._halo_id_end - self._halo_id_number + + self._group_length_sum = \ + np.array([data_file.group_length_sum + for data_file in self.data_files]) + + def _detect_output_fields(self): + field_list = [] + scalar_field_list = [] + units = {} + found_fields = \ + dict([(ptype, False) + for ptype, pnum in self.particle_count.items() + if pnum > 0]) + has_ids = False + + for data_file in self.data_files: + fl, sl, idl, _units = self.io._identify_fields(data_file) + units.update(_units) + field_list.extend([f for f in fl + if f not in field_list]) + scalar_field_list.extend([f for f in sl + if f not in scalar_field_list]) + for ptype in found_fields: + found_fields[ptype] |= data_file.total_particles[ptype] + has_ids |= len(idl) > 0 + if all(found_fields.values()) and has_ids: break + + self.field_list = field_list + self.scalar_field_list = scalar_field_list + ds = self.dataset + ds.scalar_field_list = scalar_field_list + ds.particle_types = tuple(set(pt for pt, ds in field_list)) + ds.field_units.update(units) + ds.particle_types_raw = ds.particle_types + + def _identify_base_chunk(self, dobj): + pass + + def _read_particle_fields(self, fields, dobj, chunk = None): + if len(fields) == 0: return {}, [] + fields_to_read, fields_to_generate = self._split_fields(fields) + if len(fields_to_read) == 0: + return {}, fields_to_generate + fields_to_return = self.io._read_particle_selection( + dobj, fields_to_read) + return fields_to_return, fields_to_generate + + def _get_halo_file_indices(self, ptype, identifiers): + return np.digitize(identifiers, + self._halo_index_start[ptype], right=False) - 1 + + def _get_halo_scalar_index(self, ptype, identifier): + i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] + scalar_index = identifier - self._halo_index_start[ptype][i_scalar] + return scalar_index + + def _get_halo_values(self, ptype, identifiers, fields, + f=None): + """ + Get field values for halos. IDs are likely to be + sequential (or at least monotonic), but not necessarily + all within the same file. + + This does not do much to minimize file i/o, but with + halos randomly distributed across files, there's not + much more we can do. + """ + + # if a file is already open, don't open it again + filename = None if f is None \ + else f.filename + + data = defaultdict(lambda: np.empty(identifiers.size)) + i_scalars = self._get_halo_file_indices(ptype, identifiers) + for i_scalar in np.unique(i_scalars): + target = i_scalars == i_scalar + scalar_indices = identifiers - \ + self._halo_index_start[ptype][i_scalar] + + # only open file if it's not already open + my_f = f if self.data_files[i_scalar].filename == filename \ + else h5py.File(self.data_files[i_scalar].filename, "r") + + for field in fields: + data[field][target] = \ + my_f[os.path.join(ptype, field)][()][scalar_indices[target]] + + if self.data_files[i_scalar].filename != filename: my_f.close() + + return data + +class HaloCatalogHaloDataset(ParticleDataset): + _index_class = HaloCatalogHaloParticleIndex + _file_class = HaloCatalogHDF5File + _field_info_class = HaloCatalogHaloFieldInfo + + def __init__(self, ds, dataset_type="halo_catalog_halo_hdf5"): + self.real_ds = ds + for attr in ['filename_template', 'file_count', + 'particle_types_raw', 'particle_types', + 'periodicity']: + setattr(self, attr, getattr(self.real_ds, attr)) + + super(HaloCatalogHaloDataset, self).__init__( + self.real_ds.parameter_filename, dataset_type) + + def print_key_parameters(self): + pass + + def _set_derived_attrs(self): + pass + + def _parse_parameter_file(self): + for attr in ["cosmological_simulation", "cosmology", + "current_redshift", "current_time", + "dimensionality", "domain_dimensions", + "domain_left_edge", "domain_right_edge", + "domain_width", "hubble_constant", + "omega_lambda", "omega_matter", + "unique_identifier"]: + setattr(self, attr, getattr(self.real_ds, attr)) + + def set_code_units(self): + for unit in ["length", "time", "mass", + "velocity", "magnetic", "temperature"]: + my_unit = "%s_unit" % unit + setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) + self.unit_registry = self.real_ds.unit_registry + + def __repr__(self): + return "%s" % self.real_ds + + def _setup_classes(self): + self.objects = [] + +class HaloCatalogHaloContainer(YTSelectionContainer): + """ + Create a data container to get member particles and individual + values from halos and subhalos. Halo mass, position, and + velocity are set as attributes. Halo IDs are accessible + through the field, "member_ids". Other fields that are one + value per halo are accessible as normal. The field list for + halo objects can be seen in `ds.halos_field_list`. + + Parameters + ---------- + ptype : string + The type of halo, either "Group" for the main halo or + "Subhalo" for subhalos. + particle_identifier : int or tuple of ints + The halo or subhalo id. If requesting a subhalo, the id + can also be given as a tuple of the main halo id and + subgroup id, such as (1, 4) for subgroup 4 of halo 1. + + Attributes + ---------- + particle_identifier : int + The id of the halo or subhalo. + group_identifier : int + For subhalos, the id of the enclosing halo. + subgroup_identifier : int + For subhalos, the relative id of the subhalo within + the enclosing halo. + particle_number : int + Number of particles in the halo. + mass : float + Halo mass. + position : array of floats + Halo position. + velocity : array of floats + Halo velocity. + + Note + ---- + Relevant Fields: + + * particle_number - number of particles + * subhalo_number - number of subhalos + * group_identifier - id of parent group for subhalos + + Examples + -------- + + >>> import yt + >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5") + >>> + >>> halo = ds.halo("Group", 0) + >>> print(halo.mass) + 13256.5517578 code_mass + >>> print(halo.position) + [ 16.18603706 6.95965052 12.52694607] code_length + >>> print(halo.velocity) + [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s + >>> print(halo["Group_R_Crit200"]) + [ 0.79668683] code_length + >>> + >>> # particle ids for this halo + >>> print(halo["member_ids"]) + [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless + >>> + >>> # get the first subhalo of this halo + >>> subhalo = ds.halo("Subhalo", (0, 0)) + >>> print(subhalo["member_ids"]) + [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless + + """ + + _type_name = "halo" + _con_args = ("ptype", "particle_identifier") + _spatial = False + + def __init__(self, ptype, particle_identifier, ds=None): + if ptype not in ds.particle_types_raw: + raise RuntimeError("Possible halo types are %s, supplied \"%s\"." % + (ds.particle_types_raw, ptype)) + + self.ptype = ptype + self._current_particle_type = ptype + super(HaloCatalogHaloContainer, self).__init__(ds, {}) + + if ptype == "Subhalo" and isinstance(particle_identifier, tuple): + self.group_identifier, self.subgroup_identifier = \ + particle_identifier + my_data = self.index._get_halo_values( + "Group", np.array([self.group_identifier]), + ["GroupFirstSub"]) + self.particle_identifier = \ + np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) + else: + self.particle_identifier = particle_identifier + + if self.particle_identifier >= self.index.particle_count[ptype]: + raise RuntimeError("%s %d requested, but only %d %s objects exist." % + (ptype, particle_identifier, + self.index.particle_count[ptype], ptype)) + + # Find the file that has the scalar values for this halo. + i_scalar = self.index._get_halo_file_indices( + ptype, [self.particle_identifier])[0] + self.scalar_data_file = self.index.data_files[i_scalar] + + # index within halo arrays that corresponds to this halo + self.scalar_index = self.index._get_halo_scalar_index( + ptype, self.particle_identifier) + + halo_fields = ["%sLen" % ptype] + if ptype == "Subhalo": halo_fields.append("SubhaloGrNr") + my_data = self.index._get_halo_values( + ptype, np.array([self.particle_identifier]), + halo_fields) + self.particle_number = np.int64(my_data["%sLen" % ptype][0]) + + if ptype == "Group": + self.group_identifier = self.particle_identifier + id_offset = 0 + # index of file that has scalar values for the group + g_scalar = i_scalar + group_index = self.scalar_index + + # If a subhalo, find the index of the parent. + elif ptype == "Subhalo": + self.group_identifier = np.int64(my_data["SubhaloGrNr"][0]) + + # Find the file that has the scalar values for the parent group. + g_scalar = self.index._get_halo_file_indices( + "Group", [self.group_identifier])[0] + + # index within halo arrays that corresponds to the paent group + group_index = self.index._get_halo_scalar_index( + "Group", self.group_identifier) + + my_data = self.index._get_halo_values( + "Group", np.array([self.group_identifier]), + ["GroupNsubs", "GroupFirstSub"]) + self.subgroup_identifier = self.particle_identifier - \ + np.int64(my_data["GroupFirstSub"][0]) + parent_subhalos = my_data["GroupNsubs"][0] + + mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \ + (self.particle_identifier, self.subgroup_identifier, + parent_subhalos, self.group_identifier)) + + # ids of the sibling subhalos that come before this one + if self.subgroup_identifier > 0: + sub_ids = np.arange( + self.particle_identifier - self.subgroup_identifier, + self.particle_identifier) + my_data = self.index._get_halo_values( + "Subhalo", sub_ids, ["SubhaloLen"]) + id_offset = my_data["SubhaloLen"].sum(dtype=np.int64) + else: + id_offset = 0 + + # Calculate the starting index for the member particles. + # First, add up all the particles in the earlier files. + all_id_start = self.index._group_length_sum[:g_scalar].sum(dtype=np.int64) + + # Now add the halos in this file that come before. + with h5py.File(self.index.data_files[g_scalar].filename, "r") as f: + all_id_start += f["Group"]["GroupLen"][:group_index].sum(dtype=np.int64) + + # Add the subhalo offset. + all_id_start += id_offset + + # indices of first and last files containing member particles + i_start = np.digitize([all_id_start], + self.index._halo_id_start, + right=False)[0] - 1 + i_end = np.digitize([all_id_start+self.particle_number], + self.index._halo_id_end, + right=True)[0] + self.field_data_files = self.index.data_files[i_start:i_end+1] + + # starting and ending indices for each file containing particles + self.field_data_start = \ + (all_id_start - + self.index._halo_id_start[i_start:i_end+1]).clip(min=0) + self.field_data_start = self.field_data_start.astype(np.int64) + self.field_data_end = \ + (all_id_start + self.particle_number - + self.index._halo_id_start[i_start:i_end+1]).clip( + max=self.index._halo_id_number[i_start:i_end+1]) + self.field_data_end = self.field_data_end.astype(np.int64) + + for attr in ["mass", "position", "velocity"]: + setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) + + def __repr__(self): + return "%s_%s_%09d" % \ + (self.ds, self.ptype, self.particle_identifier) diff --git a/yt/frontends/halo_catalog/fields.py b/yt/frontends/halo_catalog/fields.py index 7740ba150ea..b498ae74b61 100644 --- a/yt/frontends/halo_catalog/fields.py +++ b/yt/frontends/halo_catalog/fields.py @@ -5,18 +5,27 @@ v_units = "cm / s" r_units = "cm" +_particle_fields = ( + ("particle_identifier", ("", [], None)), + ("particle_position_x", (p_units, [], None)), + ("particle_position_y", (p_units, [], None)), + ("particle_position_z", (p_units, [], None)), + ("particle_velocity_x", (v_units, [], None)), + ("particle_velocity_y", (v_units, [], None)), + ("particle_velocity_z", (v_units, [], None)), + ("particle_mass", (m_units, [], "Virial Mass")), + ("virial_radius", (r_units, [], "Virial Radius")), +) class HaloCatalogFieldInfo(FieldInfoContainer): known_other_fields = () - known_particle_fields = ( - ("particle_identifier", ("", [], None)), - ("particle_position_x", (p_units, [], None)), - ("particle_position_y", (p_units, [], None)), - ("particle_position_z", (p_units, [], None)), - ("particle_velocity_x", (v_units, [], None)), - ("particle_velocity_y", (v_units, [], None)), - ("particle_velocity_z", (v_units, [], None)), - ("particle_mass", (m_units, [], "Virial Mass")), - ("virial_radius", (r_units, [], "Virial Radius")), - ) + known_particle_fields = _particle_fields + +class HaloCatalogHaloFieldInfo(FieldInfoContainer): + known_other_fields = () + + known_particle_fields = _particle_fields + \ + ( + ("ids", ("", ["member_ids"], None)), + ) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index b5588c6f1a8..8731795293c 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -111,3 +111,147 @@ def _identify_fields(self, data_file): parse_h5_attr(f[field], "units")) for field in f]) return fields, units + +class IOHandlerHaloCatalogHaloHDF5(IOHandlerHaloCatalogHDF5): + _dataset_type = "halo_catalog_halo_hdf5" + + def _read_particle_coords(self, chunks, ptf): + pass + + def _read_particle_selection(self, dobj, fields): + rv = {} + ind = {} + # We first need a set of masks for each particle type + ptf = defaultdict(list) # ON-DISK TO READ + fsize = defaultdict(lambda: 0) # COUNT RV + field_maps = defaultdict(list) # ptypes -> fields + unions = self.ds.particle_unions + # What we need is a mapping from particle types to return types + for field in fields: + ftype, fname = field + fsize[field] = 0 + # We should add a check for p.fparticle_unions or something here + if ftype in unions: + for pt in unions[ftype]: + ptf[pt].append(fname) + field_maps[pt, fname].append(field) + else: + ptf[ftype].append(fname) + field_maps[field].append(field) + + # Now we allocate + psize = {dobj.ptype: dobj.particle_number} + for field in fields: + if field[0] in unions: + for pt in unions[field[0]]: + fsize[field] += psize.get(pt, 0) + else: + fsize[field] += psize.get(field[0], 0) + for field in fields: + if field[1] in self._vector_fields: + shape = (fsize[field], self._vector_fields[field[1]]) + elif field[1] in self._array_fields: + shape = (fsize[field],)+self._array_fields[field[1]] + elif field in self.ds.scalar_field_list: + shape = (1,) + else: + shape = (fsize[field], ) + rv[field] = np.empty(shape, dtype="float64") + ind[field] = 0 + # Now we read. + for field_r, vals in self._read_particle_fields(dobj, ptf): + # Note that we now need to check the mappings + for field_f in field_maps[field_r]: + my_ind = ind[field_f] + rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals + ind[field_f] += vals.shape[0] + # Now we need to truncate all our fields, since we allow for + # over-estimating. + for field_f in ind: + rv[field_f] = rv[field_f][:ind[field_f]] + return rv + + def _read_scalar_fields(self, dobj, scalar_fields): + all_data = {} + if not scalar_fields: return all_data + pcount = 1 + with h5py.File(dobj.scalar_data_file.filename, "r") as f: + for ptype, field_list in sorted(scalar_fields.items()): + for field in field_list: + if field == "particle_identifier": + field_data = \ + np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ + dobj.scalar_data_file.index_start[ptype] + elif field in f[ptype]: + field_data = f[ptype][field][()].astype("float64") + else: + fname = field[:field.rfind("_")] + field_data = f[ptype][fname][()].astype("float64") + my_div = field_data.size / pcount + if my_div > 1: + findex = int(field[field.rfind("_") + 1:]) + field_data = field_data[:, findex] + data = np.array([field_data[dobj.scalar_index]]) + all_data[(ptype, field)] = data + return all_data + + def _read_member_fields(self, dobj, member_fields): + all_data = defaultdict(lambda: np.empty(dobj.particle_number, + dtype=np.float64)) + if not member_fields: return all_data + field_start = 0 + for i, data_file in enumerate(dobj.field_data_files): + start_index = dobj.field_data_start[i] + end_index = dobj.field_data_end[i] + pcount = end_index - start_index + if pcount == 0: continue + field_end = field_start + end_index - start_index + with h5py.File(data_file.filename, "r") as f: + for ptype, field_list in sorted(member_fields.items()): + for field in field_list: + field_data = all_data[(ptype, field)] + if field in f["IDs"]: + my_data = \ + f["IDs"][field][start_index:end_index].astype("float64") + else: + fname = field[:field.rfind("_")] + my_data = \ + f["IDs"][fname][start_index:end_index].astype("float64") + my_div = my_data.size / pcount + if my_div > 1: + findex = int(field[field.rfind("_") + 1:]) + my_data = my_data[:, findex] + field_data[field_start:field_end] = my_data + field_start = field_end + return all_data + + def _read_particle_fields(self, dobj, ptf): + # separate member particle fields from scalar fields + scalar_fields = defaultdict(list) + member_fields = defaultdict(list) + for ptype, field_list in sorted(ptf.items()): + for field in field_list: + if (ptype, field) in self.ds.scalar_field_list: + scalar_fields[ptype].append(field) + else: + member_fields[ptype].append(field) + + all_data = self._read_scalar_fields(dobj, scalar_fields) + all_data.update(self._read_member_fields(dobj, member_fields)) + + for field, field_data in all_data.items(): + yield field, field_data + + def _identify_fields(self, data_file): + with h5py.File(data_file.filename, "r") as f: + scalar_fields = [("halos", field) for field in f + if not isinstance(f[field], h5py.Group)] + units = dict([(("halos", field), + parse_h5_attr(f[field], "units")) + for field in f]) + if 'particles' in f: + id_fields = [('halos', field) for field in f['particles']] + else: + id_fields = [] + + return scalar_fields+id_fields, scalar_fields, id_fields, units From c284fa09095487a88edb6cc0ba07179c15ec6046 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 6 Apr 2020 16:40:07 +0100 Subject: [PATCH 225/653] Implement io. --- yt/frontends/halo_catalog/data_structures.py | 106 +++---------------- yt/frontends/halo_catalog/io.py | 30 +----- 2 files changed, 21 insertions(+), 115 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index d4245e96799..92db5c40330 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -208,22 +208,7 @@ def _calculate_particle_index_starts(self): for ptype in self.ds.particle_types_raw]) def _create_halo_id_table(self): - """ - Create a list of halo start ids so we know which file - contains particles for a given halo. Note, the halo ids - are distributed over all files and so the ids for a given - halo are likely stored in a different file than the halo - itself. - """ - - self._halo_id_number = np.array([data_file.total_ids - for data_file in self.data_files]) - self._halo_id_end = self._halo_id_number.cumsum() - self._halo_id_start = self._halo_id_end - self._halo_id_number - - self._group_length_sum = \ - np.array([data_file.group_length_sum - for data_file in self.data_files]) + pass def _detect_output_fields(self): field_list = [] @@ -305,7 +290,7 @@ def _get_halo_values(self, ptype, identifiers, fields, for field in fields: data[field][target] = \ - my_f[os.path.join(ptype, field)][()][scalar_indices[target]] + my_f[field][scalar_indices[target]] if self.data_files[i_scalar].filename != filename: my_f.close() @@ -465,86 +450,27 @@ def __init__(self, ptype, particle_identifier, ds=None): self.scalar_index = self.index._get_halo_scalar_index( ptype, self.particle_identifier) - halo_fields = ["%sLen" % ptype] - if ptype == "Subhalo": halo_fields.append("SubhaloGrNr") + halo_fields = ['particle_number', + 'particle_index_start'] my_data = self.index._get_halo_values( ptype, np.array([self.particle_identifier]), halo_fields) - self.particle_number = np.int64(my_data["%sLen" % ptype][0]) - - if ptype == "Group": - self.group_identifier = self.particle_identifier - id_offset = 0 - # index of file that has scalar values for the group - g_scalar = i_scalar - group_index = self.scalar_index + self.particle_number = np.int64(my_data['particle_number'][0]) - # If a subhalo, find the index of the parent. - elif ptype == "Subhalo": - self.group_identifier = np.int64(my_data["SubhaloGrNr"][0]) + self.group_identifier = self.particle_identifier + id_offset = 0 + # index of file that has scalar values for the group + g_scalar = i_scalar + group_index = self.scalar_index - # Find the file that has the scalar values for the parent group. - g_scalar = self.index._get_halo_file_indices( - "Group", [self.group_identifier])[0] - - # index within halo arrays that corresponds to the paent group - group_index = self.index._get_halo_scalar_index( - "Group", self.group_identifier) - - my_data = self.index._get_halo_values( - "Group", np.array([self.group_identifier]), - ["GroupNsubs", "GroupFirstSub"]) - self.subgroup_identifier = self.particle_identifier - \ - np.int64(my_data["GroupFirstSub"][0]) - parent_subhalos = my_data["GroupNsubs"][0] - - mylog.debug("Subhalo %d is subgroup %s of %d in group %d." % \ - (self.particle_identifier, self.subgroup_identifier, - parent_subhalos, self.group_identifier)) - - # ids of the sibling subhalos that come before this one - if self.subgroup_identifier > 0: - sub_ids = np.arange( - self.particle_identifier - self.subgroup_identifier, - self.particle_identifier) - my_data = self.index._get_halo_values( - "Subhalo", sub_ids, ["SubhaloLen"]) - id_offset = my_data["SubhaloLen"].sum(dtype=np.int64) - else: - id_offset = 0 - - # Calculate the starting index for the member particles. - # First, add up all the particles in the earlier files. - all_id_start = self.index._group_length_sum[:g_scalar].sum(dtype=np.int64) - - # Now add the halos in this file that come before. - with h5py.File(self.index.data_files[g_scalar].filename, "r") as f: - all_id_start += f["Group"]["GroupLen"][:group_index].sum(dtype=np.int64) - - # Add the subhalo offset. - all_id_start += id_offset - - # indices of first and last files containing member particles - i_start = np.digitize([all_id_start], - self.index._halo_id_start, - right=False)[0] - 1 - i_end = np.digitize([all_id_start+self.particle_number], - self.index._halo_id_end, - right=True)[0] - self.field_data_files = self.index.data_files[i_start:i_end+1] + # Data files containing particles belonging to this halo. + self.field_data_files = [self.index.data_files[i_scalar]] # starting and ending indices for each file containing particles - self.field_data_start = \ - (all_id_start - - self.index._halo_id_start[i_start:i_end+1]).clip(min=0) - self.field_data_start = self.field_data_start.astype(np.int64) - self.field_data_end = \ - (all_id_start + self.particle_number - - self.index._halo_id_start[i_start:i_end+1]).clip( - max=self.index._halo_id_number[i_start:i_end+1]) - self.field_data_end = self.field_data_end.astype(np.int64) - - for attr in ["mass", "position", "velocity"]: + self.field_data_start = [np.int64(my_data['particle_index_start'][0])] + self.field_data_end = [self.field_data_start[0] + self.particle_number] + + for attr in ["mass", "position"]:#, "velocity"]: setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) def __repr__(self): diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 8731795293c..6c4123c016d 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -1,3 +1,5 @@ +from collections import \ + defaultdict import numpy as np from yt.funcs import mylog, parse_h5_attr @@ -178,20 +180,7 @@ def _read_scalar_fields(self, dobj, scalar_fields): with h5py.File(dobj.scalar_data_file.filename, "r") as f: for ptype, field_list in sorted(scalar_fields.items()): for field in field_list: - if field == "particle_identifier": - field_data = \ - np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ - dobj.scalar_data_file.index_start[ptype] - elif field in f[ptype]: - field_data = f[ptype][field][()].astype("float64") - else: - fname = field[:field.rfind("_")] - field_data = f[ptype][fname][()].astype("float64") - my_div = field_data.size / pcount - if my_div > 1: - findex = int(field[field.rfind("_") + 1:]) - field_data = field_data[:, findex] - data = np.array([field_data[dobj.scalar_index]]) + data = np.array([f[field][dobj.scalar_index]]).astype("float64") all_data[(ptype, field)] = data return all_data @@ -210,17 +199,8 @@ def _read_member_fields(self, dobj, member_fields): for ptype, field_list in sorted(member_fields.items()): for field in field_list: field_data = all_data[(ptype, field)] - if field in f["IDs"]: - my_data = \ - f["IDs"][field][start_index:end_index].astype("float64") - else: - fname = field[:field.rfind("_")] - my_data = \ - f["IDs"][fname][start_index:end_index].astype("float64") - my_div = my_data.size / pcount - if my_div > 1: - findex = int(field[field.rfind("_") + 1:]) - my_data = my_data[:, findex] + my_data = \ + f['particles'][field][start_index:end_index].astype("float64") field_data[field_start:field_end] = my_data field_start = field_end return all_data From 43574142ed5e8a3abf0074bddcb0c027228760ab Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 10:59:19 +0100 Subject: [PATCH 226/653] Create HaloDatasetIOHandler class. --- yt/frontends/halo_catalog/io.py | 53 +++++++++++++++++++-------------- 1 file changed, 31 insertions(+), 22 deletions(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 6c4123c016d..32517c86a92 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -114,12 +114,31 @@ def _identify_fields(self, data_file): for field in f]) return fields, units -class IOHandlerHaloCatalogHaloHDF5(IOHandlerHaloCatalogHDF5): - _dataset_type = "halo_catalog_halo_hdf5" +class HaloDatasetIOHandler(IOHandlerHaloCatalogHDF5): + """ + Base class for io handlers to load halo member particles. + """ def _read_particle_coords(self, chunks, ptf): pass + def _read_particle_fields(self, dobj, ptf): + # separate member particle fields from scalar fields + scalar_fields = defaultdict(list) + member_fields = defaultdict(list) + for ptype, field_list in sorted(ptf.items()): + for field in field_list: + if (ptype, field) in self.ds.scalar_field_list: + scalar_fields[ptype].append(field) + else: + member_fields[ptype].append(field) + + all_data = self._read_scalar_fields(dobj, scalar_fields) + all_data.update(self._read_member_fields(dobj, member_fields)) + + for field, field_data in all_data.items(): + yield field, field_data + def _read_particle_selection(self, dobj, fields): rv = {} ind = {} @@ -173,9 +192,14 @@ def _read_particle_selection(self, dobj, fields): rv[field_f] = rv[field_f][:ind[field_f]] return rv + +class IOHandlerHaloCatalogHaloHDF5(HaloDatasetIOHandler): + _dataset_type = "halo_catalog_halo_hdf5" + def _read_scalar_fields(self, dobj, scalar_fields): all_data = {} - if not scalar_fields: return all_data + if not scalar_fields: + return all_data pcount = 1 with h5py.File(dobj.scalar_data_file.filename, "r") as f: for ptype, field_list in sorted(scalar_fields.items()): @@ -187,13 +211,15 @@ def _read_scalar_fields(self, dobj, scalar_fields): def _read_member_fields(self, dobj, member_fields): all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64)) - if not member_fields: return all_data + if not member_fields: + return all_data field_start = 0 for i, data_file in enumerate(dobj.field_data_files): start_index = dobj.field_data_start[i] end_index = dobj.field_data_end[i] pcount = end_index - start_index - if pcount == 0: continue + if pcount == 0: + continue field_end = field_start + end_index - start_index with h5py.File(data_file.filename, "r") as f: for ptype, field_list in sorted(member_fields.items()): @@ -205,23 +231,6 @@ def _read_member_fields(self, dobj, member_fields): field_start = field_end return all_data - def _read_particle_fields(self, dobj, ptf): - # separate member particle fields from scalar fields - scalar_fields = defaultdict(list) - member_fields = defaultdict(list) - for ptype, field_list in sorted(ptf.items()): - for field in field_list: - if (ptype, field) in self.ds.scalar_field_list: - scalar_fields[ptype].append(field) - else: - member_fields[ptype].append(field) - - all_data = self._read_scalar_fields(dobj, scalar_fields) - all_data.update(self._read_member_fields(dobj, member_fields)) - - for field, field_data in all_data.items(): - yield field, field_data - def _identify_fields(self, data_file): with h5py.File(data_file.filename, "r") as f: scalar_fields = [("halos", field) for field in f From 01ab30ebcf4dbe0f2c380083d4b5635101738579 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 11:11:43 +0100 Subject: [PATCH 227/653] Refactor IOHandlerGadgetFOFHaloHDF5 as subclass of HaloDatasetIOHandler. --- yt/frontends/gadget_fof/io.py | 160 +++++++++----------------------- yt/frontends/halo_catalog/io.py | 52 +++++------ 2 files changed, 68 insertions(+), 144 deletions(-) diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index ee621e6cb0e..acdd760bf1c 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -2,6 +2,8 @@ import numpy as np +from yt.frontends.halo_catalog.io import \ + HaloDatasetIOHandler from yt.funcs import mylog from yt.utilities.exceptions import YTDomainOverflow from yt.utilities.io_handler import BaseIOHandler @@ -188,91 +190,26 @@ def _identify_fields(self, data_file): self.offset_fields = self.offset_fields.union(set(my_offset_fields)) return fields, {} - -class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5): +class IOHandlerGadgetFOFHaloHDF5(HaloDatasetIOHandler, IOHandlerGadgetFOFHDF5): _dataset_type = "gadget_fof_halo_hdf5" - def _read_particle_coords(self, chunks, ptf): - pass - - def _read_particle_selection(self, dobj, fields): - rv = {} - ind = {} - # We first need a set of masks for each particle type - ptf = defaultdict(list) # ON-DISK TO READ - fsize = defaultdict(lambda: 0) # COUNT RV - field_maps = defaultdict(list) # ptypes -> fields - unions = self.ds.particle_unions - # What we need is a mapping from particle types to return types - for field in fields: - ftype, fname = field - fsize[field] = 0 - # We should add a check for p.fparticle_unions or something here - if ftype in unions: - for pt in unions[ftype]: - ptf[pt].append(fname) - field_maps[pt, fname].append(field) - else: - ptf[ftype].append(fname) - field_maps[field].append(field) - - # Now we allocate - psize = {dobj.ptype: dobj.particle_number} - for field in fields: - if field[0] in unions: - for pt in unions[field[0]]: - fsize[field] += psize.get(pt, 0) - else: - fsize[field] += psize.get(field[0], 0) - for field in fields: - if field[1] in self._vector_fields: - shape = (fsize[field], self._vector_fields[field[1]]) - elif field[1] in self._array_fields: - shape = (fsize[field],) + self._array_fields[field[1]] - elif field in self.ds.scalar_field_list: - shape = (1,) - else: - shape = (fsize[field],) - rv[field] = np.empty(shape, dtype="float64") - ind[field] = 0 - # Now we read. - for field_r, vals in self._read_particle_fields(dobj, ptf): - # Note that we now need to check the mappings - for field_f in field_maps[field_r]: - my_ind = ind[field_f] - rv[field_f][my_ind : my_ind + vals.shape[0], ...] = vals - ind[field_f] += vals.shape[0] - # Now we need to truncate all our fields, since we allow for - # over-estimating. - for field_f in ind: - rv[field_f] = rv[field_f][: ind[field_f]] - return rv + def _identify_fields(self, data_file): + fields = [] + scalar_fields = [] + id_fields = {} + with h5py.File(data_file.filename, "r") as f: + for ptype in self.ds.particle_types_raw: + fields.append((ptype, "particle_identifier")) + scalar_fields.append((ptype, "particle_identifier")) + my_fields, my_offset_fields = \ + subfind_field_list(f[ptype], ptype, data_file.total_particles) + fields.extend(my_fields) + scalar_fields.extend(my_fields) - def _read_scalar_fields(self, dobj, scalar_fields): - all_data = {} - if not scalar_fields: - return all_data - pcount = 1 - with h5py.File(dobj.scalar_data_file.filename, mode="r") as f: - for ptype, field_list in sorted(scalar_fields.items()): - for field in field_list: - if field == "particle_identifier": - field_data = ( - np.arange(dobj.scalar_data_file.total_particles[ptype]) - + dobj.scalar_data_file.index_start[ptype] - ) - elif field in f[ptype]: - field_data = f[ptype][field][()].astype("float64") - else: - fname = field[: field.rfind("_")] - field_data = f[ptype][fname][()].astype("float64") - my_div = field_data.size / pcount - if my_div > 1: - findex = int(field[field.rfind("_") + 1 :]) - field_data = field_data[:, findex] - data = np.array([field_data[dobj.scalar_index]]) - all_data[(ptype, field)] = data - return all_data + if "IDs" not in f: continue + id_fields = [(ptype, field) for field in f["IDs"]] + fields.extend(id_fields) + return fields, scalar_fields, id_fields, {} def _read_member_fields(self, dobj, member_fields): all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64)) @@ -307,42 +244,29 @@ def _read_member_fields(self, dobj, member_fields): field_start = field_end return all_data - def _read_particle_fields(self, dobj, ptf): - # separate member particle fields from scalar fields - scalar_fields = defaultdict(list) - member_fields = defaultdict(list) - for ptype, field_list in sorted(ptf.items()): - for field in field_list: - if (ptype, field) in self.ds.scalar_field_list: - scalar_fields[ptype].append(field) - else: - member_fields[ptype].append(field) - - all_data = self._read_scalar_fields(dobj, scalar_fields) - all_data.update(self._read_member_fields(dobj, member_fields)) - - for field, field_data in all_data.items(): - yield field, field_data - - def _identify_fields(self, data_file): - fields = [] - scalar_fields = [] - id_fields = {} - with h5py.File(data_file.filename, mode="r") as f: - for ptype in self.ds.particle_types_raw: - fields.append((ptype, "particle_identifier")) - scalar_fields.append((ptype, "particle_identifier")) - my_fields, my_offset_fields = subfind_field_list( - f[ptype], ptype, data_file.total_particles - ) - fields.extend(my_fields) - scalar_fields.extend(my_fields) - - if "IDs" not in f: - continue - id_fields = [(ptype, field) for field in f["IDs"]] - fields.extend(id_fields) - return fields, scalar_fields, id_fields, {} + def _read_scalar_fields(self, dobj, scalar_fields): + all_data = {} + if not scalar_fields: return all_data + pcount = 1 + with h5py.File(dobj.scalar_data_file.filename, "r") as f: + for ptype, field_list in sorted(scalar_fields.items()): + for field in field_list: + if field == "particle_identifier": + field_data = \ + np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ + dobj.scalar_data_file.index_start[ptype] + elif field in f[ptype]: + field_data = f[ptype][field][()].astype("float64") + else: + fname = field[:field.rfind("_")] + field_data = f[ptype][fname][()].astype("float64") + my_div = field_data.size / pcount + if my_div > 1: + findex = int(field[field.rfind("_") + 1:]) + field_data = field_data[:, findex] + data = np.array([field_data[dobj.scalar_index]]) + all_data[(ptype, field)] = data + return all_data def subfind_field_list(fh, ptype, pcount): diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 32517c86a92..2da384936de 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -114,7 +114,7 @@ def _identify_fields(self, data_file): for field in f]) return fields, units -class HaloDatasetIOHandler(IOHandlerHaloCatalogHDF5): +class HaloDatasetIOHandler(): """ Base class for io handlers to load halo member particles. """ @@ -193,20 +193,22 @@ def _read_particle_selection(self, dobj, fields): return rv -class IOHandlerHaloCatalogHaloHDF5(HaloDatasetIOHandler): +class IOHandlerHaloCatalogHaloHDF5(HaloDatasetIOHandler, IOHandlerHaloCatalogHDF5): _dataset_type = "halo_catalog_halo_hdf5" - def _read_scalar_fields(self, dobj, scalar_fields): - all_data = {} - if not scalar_fields: - return all_data - pcount = 1 - with h5py.File(dobj.scalar_data_file.filename, "r") as f: - for ptype, field_list in sorted(scalar_fields.items()): - for field in field_list: - data = np.array([f[field][dobj.scalar_index]]).astype("float64") - all_data[(ptype, field)] = data - return all_data + def _identify_fields(self, data_file): + with h5py.File(data_file.filename, "r") as f: + scalar_fields = [("halos", field) for field in f + if not isinstance(f[field], h5py.Group)] + units = dict([(("halos", field), + parse_h5_attr(f[field], "units")) + for field in f]) + if 'particles' in f: + id_fields = [('halos', field) for field in f['particles']] + else: + id_fields = [] + + return scalar_fields+id_fields, scalar_fields, id_fields, units def _read_member_fields(self, dobj, member_fields): all_data = defaultdict(lambda: np.empty(dobj.particle_number, @@ -231,16 +233,14 @@ def _read_member_fields(self, dobj, member_fields): field_start = field_end return all_data - def _identify_fields(self, data_file): - with h5py.File(data_file.filename, "r") as f: - scalar_fields = [("halos", field) for field in f - if not isinstance(f[field], h5py.Group)] - units = dict([(("halos", field), - parse_h5_attr(f[field], "units")) - for field in f]) - if 'particles' in f: - id_fields = [('halos', field) for field in f['particles']] - else: - id_fields = [] - - return scalar_fields+id_fields, scalar_fields, id_fields, units + def _read_scalar_fields(self, dobj, scalar_fields): + all_data = {} + if not scalar_fields: + return all_data + pcount = 1 + with h5py.File(dobj.scalar_data_file.filename, "r") as f: + for ptype, field_list in sorted(scalar_fields.items()): + for field in field_list: + data = np.array([f[field][dobj.scalar_index]]).astype("float64") + all_data[(ptype, field)] = data + return all_data From b8311b00323dea1ad64a275b1b1d5634ffe0d2bf Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 11:29:29 +0100 Subject: [PATCH 228/653] Create HaloDatasetParticleIndex class. --- yt/frontends/halo_catalog/data_structures.py | 81 +++++++++++--------- yt/frontends/halo_catalog/io.py | 2 - 2 files changed, 43 insertions(+), 40 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 92db5c40330..f045dd45453 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -157,39 +157,14 @@ def _is_valid(self, *args, **kwargs): return True return False -class HaloCatalogHaloParticleIndex(ParticleIndex): +class HaloDatasetParticleIndex(ParticleIndex): + """ + Base class for particle index objects that read halo member particles. + """ + def __init__(self, ds, dataset_type): self.real_ds = weakref.proxy(ds.real_ds) - super(HaloCatalogHaloParticleIndex, self).__init__(ds, dataset_type) - - def _setup_data_io(self): - super(HaloCatalogHaloParticleIndex, self)._setup_data_io() - self._setup_filenames() - - def _setup_geometry(self): - self._setup_data_io() - - if self.real_ds._instantiated_index is None: - template = self.real_ds.filename_template - ndoms = self.real_ds.file_count - cls = self.real_ds._file_class - self.data_files = \ - [cls(self.dataset, self.io, template % {'num':i}, i, None) - for i in range(ndoms)] - else: - self.data_files = self.real_ds.index.data_files - - self._calculate_particle_index_starts() - self._calculate_particle_count() - self._create_halo_id_table() - - def _calculate_particle_count(self): - """ - Calculate the total number of each type of particle. - """ - self.particle_count = \ - dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw]) + super(HaloDatasetParticleIndex, self).__init__(ds, dataset_type) def _calculate_particle_index_starts(self): """ @@ -207,6 +182,14 @@ def _calculate_particle_index_starts(self): for data_file in self.data_files])) for ptype in self.ds.particle_types_raw]) + def _calculate_particle_count(self): + """ + Calculate the total number of each type of particle. + """ + self.particle_count = \ + dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw]) + def _create_halo_id_table(self): pass @@ -240,6 +223,15 @@ def _detect_output_fields(self): ds.field_units.update(units) ds.particle_types_raw = ds.particle_types + def _get_halo_file_indices(self, ptype, identifiers): + return np.digitize(identifiers, + self._halo_index_start[ptype], right=False) - 1 + + def _get_halo_scalar_index(self, ptype, identifier): + i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] + scalar_index = identifier - self._halo_index_start[ptype][i_scalar] + return scalar_index + def _identify_base_chunk(self, dobj): pass @@ -252,14 +244,27 @@ def _read_particle_fields(self, fields, dobj, chunk = None): dobj, fields_to_read) return fields_to_return, fields_to_generate - def _get_halo_file_indices(self, ptype, identifiers): - return np.digitize(identifiers, - self._halo_index_start[ptype], right=False) - 1 + def _setup_geometry(self): + self._setup_data_io() - def _get_halo_scalar_index(self, ptype, identifier): - i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] - scalar_index = identifier - self._halo_index_start[ptype][i_scalar] - return scalar_index + if self.real_ds._instantiated_index is None: + template = self.real_ds.filename_template + ndoms = self.real_ds.file_count + cls = self.real_ds._file_class + self.data_files = \ + [cls(self.dataset, self.io, template % {'num':i}, i, None) + for i in range(ndoms)] + else: + self.data_files = self.real_ds.index.data_files + + self._calculate_particle_index_starts() + self._calculate_particle_count() + self._create_halo_id_table() + +class HaloCatalogHaloParticleIndex(HaloDatasetParticleIndex): + def _setup_data_io(self): + super(HaloCatalogHaloParticleIndex, self)._setup_data_io() + self._setup_filenames() def _get_halo_values(self, ptype, identifiers, fields, f=None): diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 2da384936de..011e1c489e0 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -192,7 +192,6 @@ def _read_particle_selection(self, dobj, fields): rv[field_f] = rv[field_f][:ind[field_f]] return rv - class IOHandlerHaloCatalogHaloHDF5(HaloDatasetIOHandler, IOHandlerHaloCatalogHDF5): _dataset_type = "halo_catalog_halo_hdf5" @@ -237,7 +236,6 @@ def _read_scalar_fields(self, dobj, scalar_fields): all_data = {} if not scalar_fields: return all_data - pcount = 1 with h5py.File(dobj.scalar_data_file.filename, "r") as f: for ptype, field_list in sorted(scalar_fields.items()): for field in field_list: From a734ea0fa3d3a368179d0fa43bbfa65506486398 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 13:28:02 +0100 Subject: [PATCH 229/653] Make functions the same. --- yt/frontends/gadget_fof/data_structures.py | 32 +++++++++++++------- yt/frontends/halo_catalog/data_structures.py | 3 ++ 2 files changed, 24 insertions(+), 11 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 81019371995..ef975823a73 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -5,17 +5,27 @@ import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer -from yt.data_objects.static_output import ParticleDataset -from yt.frontends.gadget.data_structures import _fix_unit_ordering -from yt.frontends.gadget_fof.fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo -from yt.frontends.halo_catalog.data_structures import HaloCatalogFile -from yt.funcs import only_on_root, setdefaultattr -from yt.geometry.particle_geometry_handler import ParticleIndex -from yt.utilities.cosmology import Cosmology -from yt.utilities.logger import ytLogger as mylog -from yt.utilities.on_demand_imports import _h5py as h5py - +from yt.data_objects.data_containers import \ + YTSelectionContainer +from yt.data_objects.static_output import \ + ParticleDataset +from yt.frontends.gadget.data_structures import \ + _fix_unit_ordering +from yt.frontends.gadget_fof.fields import \ + GadgetFOFFieldInfo, \ + GadgetFOFHaloFieldInfo +from yt.frontends.halo_catalog.data_structures import \ + HaloCatalogFile, \ + HaloDatasetParticleIndex +from yt.funcs import \ + only_on_root, \ + setdefaultattr +from yt.geometry.particle_geometry_handler import \ + ParticleIndex +from yt.utilities.cosmology import \ + Cosmology +from yt.utilities.logger import ytLogger as \ + mylog class GadgetFOFParticleIndex(ParticleIndex): def _calculate_particle_count(self): diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index f045dd45453..2845de3a7ff 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -171,11 +171,14 @@ def _calculate_particle_index_starts(self): Create a dict of halo id offsets for each file. """ particle_count = defaultdict(int) + offset_count = 0 for data_file in self.data_files: data_file.index_start = dict([(ptype, particle_count[ptype]) for ptype in data_file.total_particles]) + data_file.offset_start = offset_count for ptype in data_file.total_particles: particle_count[ptype] += data_file.total_particles[ptype] + offset_count += getattr(data_file, "total_offset", 0) self._halo_index_start = \ dict([(ptype, np.array([data_file.index_start[ptype] From 47096b16145589c352dcb90a9481cdbdb4b2dc6e Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 14:11:48 +0100 Subject: [PATCH 230/653] Implement two more index base classes. --- yt/frontends/gadget_fof/data_structures.py | 103 ++----------------- yt/frontends/halo_catalog/data_structures.py | 29 +++--- 2 files changed, 24 insertions(+), 108 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index ef975823a73..52b97fd5b05 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -16,6 +16,7 @@ GadgetFOFHaloFieldInfo from yt.frontends.halo_catalog.data_structures import \ HaloCatalogFile, \ + HaloCatalogParticleIndex, \ HaloDatasetParticleIndex from yt.funcs import \ only_on_root, \ @@ -27,44 +28,7 @@ from yt.utilities.logger import ytLogger as \ mylog -class GadgetFOFParticleIndex(ParticleIndex): - def _calculate_particle_count(self): - """ - Calculate the total number of each type of particle. - """ - self.particle_count = dict( - [ - (ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw - ] - ) - - def _calculate_particle_index_starts(self): - # Halo indices are not saved in the file, so we must count by hand. - # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc. - particle_count = defaultdict(int) - offset_count = 0 - for data_file in self.data_files: - data_file.index_start = dict( - [(ptype, particle_count[ptype]) for ptype in data_file.total_particles] - ) - data_file.offset_start = offset_count - for ptype in data_file.total_particles: - particle_count[ptype] += data_file.total_particles[ptype] - offset_count += data_file.total_offset - - self._halo_index_start = dict( - [ - ( - ptype, - np.array( - [data_file.index_start[ptype] for data_file in self.data_files] - ), - ) - for ptype in self.ds.particle_types_raw - ] - ) - +class GadgetFOFParticleIndex(HaloCatalogParticleIndex): def _calculate_file_offset_map(self): # After the FOF is performed, a load-balancing step redistributes halos # and then writes more fields. Here, for each file, we create a list of @@ -333,15 +297,9 @@ def _is_valid(self, *args, **kwargs): pass return valid - -class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex): - def __init__(self, ds, dataset_type): - self.real_ds = weakref.proxy(ds.real_ds) - super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type) - - def _setup_data_io(self): - super(GadgetFOFHaloParticleIndex, self)._setup_data_io() - self._create_halo_id_table() +class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex, HaloDatasetParticleIndex): + _detect_output_fields = HaloDatasetParticleIndex._detect_output_fields + _setup_data_io = GadgetFOFParticleIndex._setup_data_io def _create_halo_id_table(self): """ @@ -362,55 +320,8 @@ def _create_halo_id_table(self): [data_file.group_length_sum for data_file in self.data_files] ) - def _detect_output_fields(self): - field_list = [] - scalar_field_list = [] - units = {} - found_fields = dict( - [(ptype, False) for ptype, pnum in self.particle_count.items() if pnum > 0] - ) - has_ids = False - - for data_file in self.data_files: - fl, sl, idl, _units = self.io._identify_fields(data_file) - units.update(_units) - field_list.extend([f for f in fl if f not in field_list]) - scalar_field_list.extend([f for f in sl if f not in scalar_field_list]) - for ptype in found_fields: - found_fields[ptype] |= data_file.total_particles[ptype] - has_ids |= len(idl) > 0 - if all(found_fields.values()) and has_ids: - break - - self.field_list = field_list - self.scalar_field_list = scalar_field_list - ds = self.dataset - ds.scalar_field_list = scalar_field_list - ds.particle_types = tuple(set(pt for pt, ds in field_list)) - ds.field_units.update(units) - ds.particle_types_raw = ds.particle_types - - def _identify_base_chunk(self, dobj): - pass - - def _read_particle_fields(self, fields, dobj, chunk=None): - if len(fields) == 0: - return {}, [] - fields_to_read, fields_to_generate = self._split_fields(fields) - if len(fields_to_read) == 0: - return {}, fields_to_generate - fields_to_return = self.io._read_particle_selection(dobj, fields_to_read) - return fields_to_return, fields_to_generate - - def _get_halo_file_indices(self, ptype, identifiers): - return np.digitize(identifiers, self._halo_index_start[ptype], right=False) - 1 - - def _get_halo_scalar_index(self, ptype, identifier): - i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] - scalar_index = identifier - self._halo_index_start[ptype][i_scalar] - return scalar_index - - def _get_halo_values(self, ptype, identifiers, fields, f=None): + def _get_halo_values(self, ptype, identifiers, fields, + f=None): """ Get field values for halos. IDs are likely to be sequential (or at least monotonic), but not necessarily diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 2845de3a7ff..bd5101b0122 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -157,14 +157,18 @@ def _is_valid(self, *args, **kwargs): return True return False -class HaloDatasetParticleIndex(ParticleIndex): +class HaloCatalogParticleIndex(ParticleIndex): """ - Base class for particle index objects that read halo member particles. + Base class for halo catalog datasets. """ - def __init__(self, ds, dataset_type): - self.real_ds = weakref.proxy(ds.real_ds) - super(HaloDatasetParticleIndex, self).__init__(ds, dataset_type) + def _calculate_particle_count(self): + """ + Calculate the total number of each type of particle. + """ + self.particle_count = \ + dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw]) def _calculate_particle_index_starts(self): """ @@ -185,13 +189,14 @@ def _calculate_particle_index_starts(self): for data_file in self.data_files])) for ptype in self.ds.particle_types_raw]) - def _calculate_particle_count(self): - """ - Calculate the total number of each type of particle. - """ - self.particle_count = \ - dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw]) +class HaloDatasetParticleIndex(HaloCatalogParticleIndex): + """ + Base class for particle index objects that read halo member particles. + """ + + def __init__(self, ds, dataset_type): + self.real_ds = weakref.proxy(ds.real_ds) + super(HaloDatasetParticleIndex, self).__init__(ds, dataset_type) def _create_halo_id_table(self): pass From 85aeae7b81fad6de0a7fddfb436e662dcf4cae70 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 14:19:22 +0100 Subject: [PATCH 231/653] Abstract _get_halo_values. --- yt/frontends/gadget_fof/data_structures.py | 39 +---------- yt/frontends/halo_catalog/data_structures.py | 72 +++++++++++--------- 2 files changed, 40 insertions(+), 71 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 52b97fd5b05..51e1409e1ad 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -320,43 +320,8 @@ def _create_halo_id_table(self): [data_file.group_length_sum for data_file in self.data_files] ) - def _get_halo_values(self, ptype, identifiers, fields, - f=None): - """ - Get field values for halos. IDs are likely to be - sequential (or at least monotonic), but not necessarily - all within the same file. - - This does not do much to minimize file i/o, but with - halos randomly distributed across files, there's not - much more we can do. - """ - - # if a file is already open, don't open it again - filename = None if f is None else f.filename - - data = defaultdict(lambda: np.empty(identifiers.size)) - i_scalars = self._get_halo_file_indices(ptype, identifiers) - for i_scalar in np.unique(i_scalars): - target = i_scalars == i_scalar - scalar_indices = identifiers - self._halo_index_start[ptype][i_scalar] - - # only open file if it's not already open - my_f = ( - f - if self.data_files[i_scalar].filename == filename - else h5py.File(self.data_files[i_scalar].filename, mode="r") - ) - - for field in fields: - data[field][target] = my_f[os.path.join(ptype, field)][()][ - scalar_indices[target] - ] - - if self.data_files[i_scalar].filename != filename: - my_f.close() - - return data + def _read_halo_particle_field(self, fh, ptype, field, indices): + return fh[os.path.join(ptype, field)][indices] class GadgetFOFHaloDataset(ParticleDataset): diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index bd5101b0122..d334cc610e8 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -240,6 +240,42 @@ def _get_halo_scalar_index(self, ptype, identifier): scalar_index = identifier - self._halo_index_start[ptype][i_scalar] return scalar_index + def _get_halo_values(self, ptype, identifiers, fields, + f=None): + """ + Get field values for halos. IDs are likely to be + sequential (or at least monotonic), but not necessarily + all within the same file. + + This does not do much to minimize file i/o, but with + halos randomly distributed across files, there's not + much more we can do. + """ + + # if a file is already open, don't open it again + filename = None if f is None \ + else f.filename + + data = defaultdict(lambda: np.empty(identifiers.size)) + i_scalars = self._get_halo_file_indices(ptype, identifiers) + for i_scalar in np.unique(i_scalars): + target = i_scalars == i_scalar + scalar_indices = identifiers - \ + self._halo_index_start[ptype][i_scalar] + + # only open file if it's not already open + my_f = f if self.data_files[i_scalar].filename == filename \ + else h5py.File(self.data_files[i_scalar].filename, "r") + + for field in fields: + data[field][target] = \ + self._read_halo_particle_field( + my_f, ptype, field, scalar_indices[target]) + + if self.data_files[i_scalar].filename != filename: my_f.close() + + return data + def _identify_base_chunk(self, dobj): pass @@ -274,40 +310,8 @@ def _setup_data_io(self): super(HaloCatalogHaloParticleIndex, self)._setup_data_io() self._setup_filenames() - def _get_halo_values(self, ptype, identifiers, fields, - f=None): - """ - Get field values for halos. IDs are likely to be - sequential (or at least monotonic), but not necessarily - all within the same file. - - This does not do much to minimize file i/o, but with - halos randomly distributed across files, there's not - much more we can do. - """ - - # if a file is already open, don't open it again - filename = None if f is None \ - else f.filename - - data = defaultdict(lambda: np.empty(identifiers.size)) - i_scalars = self._get_halo_file_indices(ptype, identifiers) - for i_scalar in np.unique(i_scalars): - target = i_scalars == i_scalar - scalar_indices = identifiers - \ - self._halo_index_start[ptype][i_scalar] - - # only open file if it's not already open - my_f = f if self.data_files[i_scalar].filename == filename \ - else h5py.File(self.data_files[i_scalar].filename, "r") - - for field in fields: - data[field][target] = \ - my_f[field][scalar_indices[target]] - - if self.data_files[i_scalar].filename != filename: my_f.close() - - return data + def _read_halo_particle_field(self, fh, ptype, field, indices): + return fh[field][indices] class HaloCatalogHaloDataset(ParticleDataset): _index_class = HaloCatalogHaloParticleIndex From a0c22c5b3f8db29853bcdf58d3804764b1d6c2c9 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 14:33:16 +0100 Subject: [PATCH 232/653] Implement GadgetFOFHaloDataset as HaloDataset subclass. --- yt/frontends/gadget_fof/data_structures.py | 64 ++------------------ yt/frontends/halo_catalog/data_structures.py | 18 +++--- 2 files changed, 15 insertions(+), 67 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 51e1409e1ad..98842ea368a 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -17,7 +17,8 @@ from yt.frontends.halo_catalog.data_structures import \ HaloCatalogFile, \ HaloCatalogParticleIndex, \ - HaloDatasetParticleIndex + HaloDatasetParticleIndex, \ + HaloDataset from yt.funcs import \ only_on_root, \ setdefaultattr @@ -323,70 +324,13 @@ def _create_halo_id_table(self): def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[os.path.join(ptype, field)][indices] - -class GadgetFOFHaloDataset(ParticleDataset): +class GadgetFOFHaloDataset(HaloDataset): _index_class = GadgetFOFHaloParticleIndex _file_class = GadgetFOFHDF5File _field_info_class = GadgetFOFHaloFieldInfo def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): - self.real_ds = ds - for attr in [ - "filename_template", - "file_count", - "particle_types_raw", - "particle_types", - "periodicity", - ]: - setattr(self, attr, getattr(self.real_ds, attr)) - - super(GadgetFOFHaloDataset, self).__init__( - self.real_ds.parameter_filename, dataset_type - ) - - @classmethod - def _is_valid(self, *args, **kwargs): - # This class is not meant to be instanciated by yt.load() - return False - - def print_key_parameters(self): - pass - - def _set_derived_attrs(self): - pass - - def _parse_parameter_file(self): - for attr in [ - "cosmological_simulation", - "cosmology", - "current_redshift", - "current_time", - "dimensionality", - "domain_dimensions", - "domain_left_edge", - "domain_right_edge", - "domain_width", - "hubble_constant", - "omega_lambda", - "omega_matter", - "unique_identifier", - ]: - setattr(self, attr, getattr(self.real_ds, attr)) - - def set_code_units(self): - self._set_code_unit_attributes() - self.unit_registry = self.real_ds.unit_registry - - def _set_code_unit_attributes(self): - for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: - my_unit = "%s_unit" % unit - setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) - - def __repr__(self): - return "%s" % self.real_ds - - def _setup_classes(self): - self.objects = [] + super(GadgetFOFHaloDataset, self).__init__(ds, dataset_type) class GadgetFOFHaloContainer(YTSelectionContainer): """ diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index d334cc610e8..b055476fcdc 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -313,19 +313,15 @@ def _setup_data_io(self): def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[field][indices] -class HaloCatalogHaloDataset(ParticleDataset): - _index_class = HaloCatalogHaloParticleIndex - _file_class = HaloCatalogHDF5File - _field_info_class = HaloCatalogHaloFieldInfo - - def __init__(self, ds, dataset_type="halo_catalog_halo_hdf5"): +class HaloDataset(ParticleDataset): + def __init__(self, ds, dataset_type): self.real_ds = ds for attr in ['filename_template', 'file_count', 'particle_types_raw', 'particle_types', 'periodicity']: setattr(self, attr, getattr(self.real_ds, attr)) - super(HaloCatalogHaloDataset, self).__init__( + super(HaloDataset, self).__init__( self.real_ds.parameter_filename, dataset_type) def print_key_parameters(self): @@ -357,6 +353,14 @@ def __repr__(self): def _setup_classes(self): self.objects = [] +class HaloCatalogHaloDataset(HaloDataset): + _index_class = HaloCatalogHaloParticleIndex + _file_class = HaloCatalogHDF5File + _field_info_class = HaloCatalogHaloFieldInfo + + def __init__(self, ds, dataset_type="halo_catalog_halo_hdf5"): + super(HaloCatalogHaloDataset, self).__init__(ds, dataset_type) + class HaloCatalogHaloContainer(YTSelectionContainer): """ Create a data container to get member particles and individual From 8bda4a560646c794b9411432a6ff77a7246817b2 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 15:19:29 +0100 Subject: [PATCH 233/653] Add HaloContainer base class. --- yt/frontends/halo_catalog/data_structures.py | 67 ++++++++++---------- 1 file changed, 33 insertions(+), 34 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index b055476fcdc..648b432e233 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -361,7 +361,7 @@ class HaloCatalogHaloDataset(HaloDataset): def __init__(self, ds, dataset_type="halo_catalog_halo_hdf5"): super(HaloCatalogHaloDataset, self).__init__(ds, dataset_type) -class HaloCatalogHaloContainer(YTSelectionContainer): +class HaloContainer(YTSelectionContainer): """ Create a data container to get member particles and individual values from halos and subhalos. Halo mass, position, and @@ -435,6 +435,7 @@ class HaloCatalogHaloContainer(YTSelectionContainer): _type_name = "halo" _con_args = ("ptype", "particle_identifier") + _skip_add = True _spatial = False def __init__(self, ptype, particle_identifier, ds=None): @@ -444,56 +445,54 @@ def __init__(self, ptype, particle_identifier, ds=None): self.ptype = ptype self._current_particle_type = ptype - super(HaloCatalogHaloContainer, self).__init__(ds, {}) - - if ptype == "Subhalo" and isinstance(particle_identifier, tuple): - self.group_identifier, self.subgroup_identifier = \ - particle_identifier - my_data = self.index._get_halo_values( - "Group", np.array([self.group_identifier]), - ["GroupFirstSub"]) - self.particle_identifier = \ - np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) - else: - self.particle_identifier = particle_identifier + super(HaloContainer, self).__init__(ds, {}) - if self.particle_identifier >= self.index.particle_count[ptype]: - raise RuntimeError("%s %d requested, but only %d %s objects exist." % - (ptype, particle_identifier, - self.index.particle_count[ptype], ptype)) + self._set_identifiers(particle_identifier) # Find the file that has the scalar values for this halo. i_scalar = self.index._get_halo_file_indices( ptype, [self.particle_identifier])[0] self.scalar_data_file = self.index.data_files[i_scalar] + # Data files containing particles belonging to this halo. + self.field_data_files = [self.index.data_files[i_scalar]] + # index within halo arrays that corresponds to this halo self.scalar_index = self.index._get_halo_scalar_index( ptype, self.particle_identifier) - halo_fields = ['particle_number', - 'particle_index_start'] - my_data = self.index._get_halo_values( - ptype, np.array([self.particle_identifier]), - halo_fields) - self.particle_number = np.int64(my_data['particle_number'][0]) - - self.group_identifier = self.particle_identifier - id_offset = 0 - # index of file that has scalar values for the group - g_scalar = i_scalar - group_index = self.scalar_index - - # Data files containing particles belonging to this halo. - self.field_data_files = [self.index.data_files[i_scalar]] + self._set_io_data() + self.particle_number = self._get_particle_number() # starting and ending indices for each file containing particles - self.field_data_start = [np.int64(my_data['particle_index_start'][0])] - self.field_data_end = [self.field_data_start[0] + self.particle_number] + self._set_field_indices() for attr in ["mass", "position"]:#, "velocity"]: setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) + def _set_io_data(self): + halo_fields = self._get_member_fieldnames() + my_data = self.index._get_halo_values( + self.ptype, np.array([self.particle_identifier]), + halo_fields) + self._io_data = dict((field, np.int64(val[0])) + for field, val in my_data.items()) + def __repr__(self): return "%s_%s_%09d" % \ (self.ds, self.ptype, self.particle_identifier) + +class HaloCatalogHaloContainer(HaloContainer): + def _get_member_fieldnames(self): + return ['particle_number', 'particle_index_start'] + + def _get_particle_number(self): + return self._io_data['particle_number'] + + def _set_field_indices(self): + self.field_data_start = [self._io_data['particle_index_start']] + self.field_data_end = [self.field_data_start[0] + self.particle_number] + + def _set_identifiers(self, particle_identifier): + self.particle_identifier = particle_identifier + self.group_identifier = self.particle_identifier From 672ea2014a52a889e135942af302871e1ce30606 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 15:49:31 +0100 Subject: [PATCH 234/653] Implement GadgetFOFHaloContainer as subclass of HaloContainer. --- yt/frontends/gadget_fof/data_structures.py | 157 +++---------------- yt/frontends/halo_catalog/data_structures.py | 1 + 2 files changed, 26 insertions(+), 132 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 98842ea368a..bda81dfcd47 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -17,6 +17,7 @@ from yt.frontends.halo_catalog.data_structures import \ HaloCatalogFile, \ HaloCatalogParticleIndex, \ + HaloContainer, \ HaloDatasetParticleIndex, \ HaloDataset from yt.funcs import \ @@ -332,141 +333,27 @@ class GadgetFOFHaloDataset(HaloDataset): def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): super(GadgetFOFHaloDataset, self).__init__(ds, dataset_type) -class GadgetFOFHaloContainer(YTSelectionContainer): - """ - Create a data container to get member particles and individual - values from halos and subhalos. Halo mass, position, and - velocity are set as attributes. Halo IDs are accessible - through the field, "member_ids". Other fields that are one - value per halo are accessible as normal. The field list for - halo objects can be seen in `ds.halos_field_list`. - - Parameters - ---------- - ptype : string - The type of halo, either "Group" for the main halo or - "Subhalo" for subhalos. - particle_identifier : int or tuple of ints - The halo or subhalo id. If requesting a subhalo, the id - can also be given as a tuple of the main halo id and - subgroup id, such as (1, 4) for subgroup 4 of halo 1. - - Attributes - ---------- - particle_identifier : int - The id of the halo or subhalo. - group_identifier : int - For subhalos, the id of the enclosing halo. - subgroup_identifier : int - For subhalos, the relative id of the subhalo within - the enclosing halo. - particle_number : int - Number of particles in the halo. - mass : float - Halo mass. - position : array of floats - Halo position. - velocity : array of floats - Halo velocity. - - Note - ---- - Relevant Fields: - - * particle_number - number of particles - * subhalo_number - number of subhalos - * group_identifier - id of parent group for subhalos - - Examples - -------- - - >>> import yt - >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5") - >>> - >>> halo = ds.halo("Group", 0) - >>> print(halo.mass) - 13256.5517578 code_mass - >>> print(halo.position) - [ 16.18603706 6.95965052 12.52694607] code_length - >>> print(halo.velocity) - [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s - >>> print(halo["Group_R_Crit200"]) - [ 0.79668683] code_length - >>> - >>> # particle ids for this halo - >>> print(halo["member_ids"]) - [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless - >>> - >>> # get the first subhalo of this halo - >>> subhalo = ds.halo("Subhalo", (0, 0)) - >>> print(subhalo["member_ids"]) - [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless - - """ - - _type_name = "halo" - _con_args = ("ptype", "particle_identifier") - _spatial = False - # Do not register it to prevent .halo from being attached to all datasets - _skip_add = True - - def __init__(self, ptype, particle_identifier, ds=None): - if ptype not in ds.particle_types_raw: - raise RuntimeError( - 'Possible halo types are %s, supplied "%s".' - % (ds.particle_types_raw, ptype) - ) - - self.ptype = ptype - self._current_particle_type = ptype - super(GadgetFOFHaloContainer, self).__init__(ds, {}) - - if ptype == "Subhalo" and isinstance(particle_identifier, tuple): - self.group_identifier, self.subgroup_identifier = particle_identifier - my_data = self.index._get_halo_values( - "Group", np.array([self.group_identifier]), ["GroupFirstSub"] - ) - self.particle_identifier = np.int64( - my_data["GroupFirstSub"][0] + self.subgroup_identifier - ) - else: - self.particle_identifier = particle_identifier - - if self.particle_identifier >= self.index.particle_count[ptype]: - raise RuntimeError( - "%s %d requested, but only %d %s objects exist." - % (ptype, particle_identifier, self.index.particle_count[ptype], ptype) - ) - - # Find the file that has the scalar values for this halo. - i_scalar = self.index._get_halo_file_indices(ptype, [self.particle_identifier])[ - 0 - ] - self.scalar_data_file = self.index.data_files[i_scalar] - - # index within halo arrays that corresponds to this halo - self.scalar_index = self.index._get_halo_scalar_index( - ptype, self.particle_identifier - ) - - halo_fields = ["%sLen" % ptype] - if ptype == "Subhalo": +class GadgetFOFHaloContainer(HaloContainer): + def _get_member_fieldnames(self): + halo_fields = ["%sLen" % self.ptype] + if self.ptype == "Subhalo": halo_fields.append("SubhaloGrNr") - my_data = self.index._get_halo_values( - ptype, np.array([self.particle_identifier]), halo_fields - ) - self.particle_number = np.int64(my_data["%sLen" % ptype][0]) + return halo_fields + + def _get_particle_number(self): + return self._io_data["%sLen" % self.ptype] - if ptype == "Group": + def _set_field_indices(self): + if self.ptype == "Group": self.group_identifier = self.particle_identifier id_offset = 0 # index of file that has scalar values for the group - g_scalar = i_scalar + g_scalar = self.i_scalar group_index = self.scalar_index # If a subhalo, find the index of the parent. - elif ptype == "Subhalo": - self.group_identifier = np.int64(my_data["SubhaloGrNr"][0]) + elif self.ptype == "Subhalo": + self.group_identifier = self._io_data["SubhaloGrNr"] # Find the file that has the scalar values for the parent group. g_scalar = self.index._get_halo_file_indices( @@ -543,8 +430,14 @@ def __init__(self, ptype, particle_identifier, ds=None): ).clip(max=self.index._halo_id_number[i_start : i_end + 1]) self.field_data_end = self.field_data_end.astype(np.int64) - for attr in ["mass", "position", "velocity"]: - setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) - - def __repr__(self): - return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier) + def _set_identifiers(self, particle_identifier): + if self.ptype == "Subhalo" and isinstance(particle_identifier, tuple): + self.group_identifier, self.subgroup_identifier = \ + particle_identifier + my_data = self.index._get_halo_values( + "Group", np.array([self.group_identifier]), + ["GroupFirstSub"]) + self.particle_identifier = \ + np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) + else: + self.particle_identifier = particle_identifier diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 648b432e233..51e6652f8d6 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -452,6 +452,7 @@ def __init__(self, ptype, particle_identifier, ds=None): # Find the file that has the scalar values for this halo. i_scalar = self.index._get_halo_file_indices( ptype, [self.particle_identifier])[0] + self.i_scalar = i_scalar self.scalar_data_file = self.index.data_files[i_scalar] # Data files containing particles belonging to this halo. From d8054c21f8025d90193af228ac0c2473fa0a49b6 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 16:06:58 +0100 Subject: [PATCH 235/653] Refactor mass, position, velocity attributes as properties. --- yt/frontends/halo_catalog/data_structures.py | 22 ++++++++++++++++++-- 1 file changed, 20 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 51e6652f8d6..7963ed23b03 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -468,8 +468,26 @@ def __init__(self, ptype, particle_identifier, ds=None): # starting and ending indices for each file containing particles self._set_field_indices() - for attr in ["mass", "position"]:#, "velocity"]: - setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) + _mass = None + @property + def mass(self): + if self._mass is None: + self._mass = self[self.ptype, "particle_mass"][0] + return self._mass + + _position = None + @property + def position(self): + if self._position is None: + self._position = self[self.ptype, "particle_position"][0] + return self._position + + _velocity = None + @property + def velocity(self): + if self._velocity is None: + self._velocity = self[self.ptype, "particle_velocity"][0] + return self._velocity def _set_io_data(self): halo_fields = self._get_member_fieldnames() From d713133e0192a7011ceda2fb1e580ed97d64e7ba Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 7 Apr 2020 16:09:56 +0100 Subject: [PATCH 236/653] Update docstring. --- yt/frontends/halo_catalog/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 7963ed23b03..c4e02ea03ae 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -373,8 +373,8 @@ class HaloContainer(YTSelectionContainer): Parameters ---------- ptype : string - The type of halo, either "Group" for the main halo or - "Subhalo" for subhalos. + The type of halo. Possible options can be found by + inspecting the value of ds.particle_types_raw. particle_identifier : int or tuple of ints The halo or subhalo id. If requesting a subhalo, the id can also be given as a tuple of the main halo id and From 59ea181190eb30f247097de894316799bdf3fdaa Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 10 Jul 2020 15:45:34 +0100 Subject: [PATCH 237/653] Move HaloDatasetParticleIndex to AdaptaHOPParticleIndex since it was the only instance. --- yt/frontends/adaptahop/data_structures.py | 37 +++++++++++++++++------ 1 file changed, 27 insertions(+), 10 deletions(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 87ebddad7a9..00c0c39028a 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -11,24 +11,41 @@ import re import stat -import numpy as np - -from yt.data_objects.data_containers import YTSelectionContainer -from yt.data_objects.static_output import Dataset -from yt.frontends.halo_catalog.data_structures import ( - HaloCatalogFile, - HaloCatalogParticleIndex, -) -from yt.funcs import setdefaultattr +from yt.data_objects.data_containers import \ + YTSelectionContainer +from yt.data_objects.static_output import \ + Dataset +from yt.frontends.halo_catalog.data_structures import \ + HaloCatalogFile +from yt.funcs import \ + setdefaultattr +from yt.geometry.particle_geometry_handler import \ + ParticleIndex +from yt.utilities.cython_fortran_utils import FortranFile from yt.units import Mpc from yt.utilities.cython_fortran_utils import FortranFile from .definitions import HEADER_ATTRIBUTES from .fields import AdaptaHOPFieldInfo +class AdaptaHOPParticleIndex(ParticleIndex): + def _setup_filenames(self): + template = self.dataset.filename_template + ndoms = self.dataset.file_count + cls = self.dataset._file_class + if ndoms > 1: + self.data_files = \ + [cls(self.dataset, self.io, template % {'num':i}, i, range=None) + for i in range(ndoms)] + else: + self.data_files = \ + [cls(self.dataset, self.io, + self.dataset.parameter_filename, 0, range=None)] + self.total_particles = sum( + sum(d.total_particles.values()) for d in self.data_files) class AdaptaHOPDataset(Dataset): - _index_class = HaloCatalogParticleIndex + _index_class = AdaptaHOPParticleIndex _file_class = HaloCatalogFile _field_info_class = AdaptaHOPFieldInfo From 8d79c0fdcc3b2e5965cd31d6104a361a8cdd9f84 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 10 Jul 2020 15:48:28 +0100 Subject: [PATCH 238/653] Fix flake8 errors. --- yt/frontends/gadget_fof/data_structures.py | 9 ++------- yt/frontends/halo_catalog/data_structures.py | 1 - 2 files changed, 2 insertions(+), 8 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index bda81dfcd47..7c1130e1ff8 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -1,12 +1,9 @@ -import os -import weakref -from collections import defaultdict from functools import partial import numpy as np +import stat +import os -from yt.data_objects.data_containers import \ - YTSelectionContainer from yt.data_objects.static_output import \ ParticleDataset from yt.frontends.gadget.data_structures import \ @@ -23,8 +20,6 @@ from yt.funcs import \ only_on_root, \ setdefaultattr -from yt.geometry.particle_geometry_handler import \ - ParticleIndex from yt.utilities.cosmology import \ Cosmology from yt.utilities.logger import ytLogger as \ diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index c4e02ea03ae..ba3c3c45080 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -3,7 +3,6 @@ import glob from yt.utilities.on_demand_imports import _h5py as h5py import numpy as np -import os import weakref from .fields import \ From c29e880ae48da28921215e004f645336d52df63f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 10 Jul 2020 16:35:40 +0100 Subject: [PATCH 239/653] Add tests for halo container. --- .../halo_catalog/tests/test_outputs.py | 53 ++++++++++++++++--- 1 file changed, 47 insertions(+), 6 deletions(-) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 75383a43abf..819fa23d062 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,11 +1,23 @@ import numpy as np -from yt.convenience import load as yt_load -from yt.frontends.halo_catalog.data_structures import HaloCatalogDataset -from yt.frontends.ytdata.utilities import save_as_dataset -from yt.testing import TempDirTest, assert_array_equal, requires_module -from yt.units.yt_array import YTArray, YTQuantity - +from yt.convenience import \ + load as yt_load +from yt.frontends.halo_catalog.data_structures import \ + HaloCatalogDataset +from yt.frontends.ytdata.utilities import \ + save_as_dataset +from yt.testing import \ + assert_allclose_units, \ + assert_array_equal, \ + assert_equal, \ + requires_file, \ + requires_module, \ + TempDirTest +from yt.units.yt_array import \ + YTArray, \ + YTQuantity +from yt.utilities.answer_testing.framework import \ + data_dir_load def fake_halo_catalog(data): filename = "catalog.0.h5" @@ -86,3 +98,32 @@ def test_halo_catalog_boundary_particles(self): f2 = ds.r[field].in_base() f2.sort() assert_array_equal(f1, f2) + +t46 = "tiny_fof_halos/DD0046/DD0046.0.h5" +@requires_file(t46) +@requires_module('h5py') +def test_halo_quantities(): + ds = data_dir_load(t46) + ad = ds.all_data() + for i in range(ds.index.total_particles): + hid = int(ad['halos', 'particle_identifier'][i]) + halo = ds.halo('halos', hid) + for field in ['mass', 'position', 'velocity']: + v1 = ad['halos', 'particle_%s' % field][i] + v2 = getattr(halo, field) + assert_allclose_units( + v1, v2, rtol=1e-15, + err_msg='Halo %d %s field mismatch.' % (hid, field)) + +t46 = "tiny_fof_halos/DD0046/DD0046.0.h5" +@requires_file(t46) +@requires_module('h5py') +def test_halo_particles(): + ds = data_dir_load(t46) + i = ds.r['halos', 'particle_mass'].argmax() + hid = int(ds.r['halos', 'particle_identifier'][i]) + halo = ds.halo('halos', hid) + ids = halo['halos', 'member_ids'] + assert_equal(ids.size, 420) + assert_equal(ids.min(), 19478.) + assert_equal(ids.max(), 31669.) From 3bf02cb99eda3f3b7757285c2b9bd5b9b0161dce Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 10 Jul 2020 16:35:57 +0100 Subject: [PATCH 240/653] Update deprecated h5py syntax. --- yt/frontends/halo_catalog/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 011e1c489e0..097f580b633 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -40,7 +40,7 @@ def _yield_coordinates(self, data_file): with h5py.File(data_file.filename, "r") as f: units = parse_h5_attr(f[pn % "x"], "units") x, y, z = ( - self.ds.arr(f[pn % ax].value.astype("float64"), units) for ax in "xyz" + self.ds.arr(f[pn % ax][()].astype("float64"), units) for ax in "xyz" ) pos = uvstack([x, y, z]).T pos.convert_to_units("code_length") From 4946c449bdde202b0e773805d56011826e132f82 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Sat, 11 Jul 2020 13:25:28 +0100 Subject: [PATCH 241/653] Restore gadget_fof frontend from master. --- yt/frontends/gadget_fof/data_structures.py | 326 ++++++++++++++++++--- yt/frontends/gadget_fof/io.py | 161 +++++++--- 2 files changed, 403 insertions(+), 84 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 7c1130e1ff8..661dad579da 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -1,9 +1,12 @@ +from collections import defaultdict from functools import partial import numpy as np -import stat import os +import weakref +from yt.data_objects.data_containers import \ + YTSelectionContainer from yt.data_objects.static_output import \ ParticleDataset from yt.frontends.gadget.data_structures import \ @@ -12,20 +15,44 @@ GadgetFOFFieldInfo, \ GadgetFOFHaloFieldInfo from yt.frontends.halo_catalog.data_structures import \ - HaloCatalogFile, \ - HaloCatalogParticleIndex, \ - HaloContainer, \ - HaloDatasetParticleIndex, \ - HaloDataset + HaloCatalogFile from yt.funcs import \ only_on_root, \ setdefaultattr +from yt.geometry.particle_geometry_handler import \ + ParticleIndex from yt.utilities.cosmology import \ Cosmology from yt.utilities.logger import ytLogger as \ mylog -class GadgetFOFParticleIndex(HaloCatalogParticleIndex): +class GadgetFOFParticleIndex(ParticleIndex): + def _calculate_particle_count(self): + """ + Calculate the total number of each type of particle. + """ + self.particle_count = \ + dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw]) + + def _calculate_particle_index_starts(self): + # Halo indices are not saved in the file, so we must count by hand. + # File 0 has halos 0 to N_0 - 1, file 1 has halos N_0 to N_0 + N_1 - 1, etc. + particle_count = defaultdict(int) + offset_count = 0 + for data_file in self.data_files: + data_file.index_start = dict([(ptype, particle_count[ptype]) for + ptype in data_file.total_particles]) + data_file.offset_start = offset_count + for ptype in data_file.total_particles: + particle_count[ptype] += data_file.total_particles[ptype] + offset_count += data_file.total_offset + + self._halo_index_start = \ + dict([(ptype, np.array([data_file.index_start[ptype] + for data_file in self.data_files])) + for ptype in self.ds.particle_types_raw]) + def _calculate_file_offset_map(self): # After the FOF is performed, a load-balancing step redistributes halos # and then writes more fields. Here, for each file, we create a list of @@ -294,9 +321,10 @@ def _is_valid(self, *args, **kwargs): pass return valid -class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex, HaloDatasetParticleIndex): - _detect_output_fields = HaloDatasetParticleIndex._detect_output_fields - _setup_data_io = GadgetFOFParticleIndex._setup_data_io +class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex): + def __init__(self, ds, dataset_type): + self.real_ds = weakref.proxy(ds.real_ds) + super(GadgetFOFHaloParticleIndex, self).__init__(ds, dataset_type) def _create_halo_id_table(self): """ @@ -317,38 +345,265 @@ def _create_halo_id_table(self): [data_file.group_length_sum for data_file in self.data_files] ) - def _read_halo_particle_field(self, fh, ptype, field, indices): - return fh[os.path.join(ptype, field)][indices] + def _detect_output_fields(self): + field_list = [] + scalar_field_list = [] + units = {} + found_fields = \ + dict([(ptype, False) + for ptype, pnum in self.particle_count.items() + if pnum > 0]) + has_ids = False + + for data_file in self.data_files: + fl, sl, idl, _units = self.io._identify_fields(data_file) + units.update(_units) + field_list.extend([f for f in fl + if f not in field_list]) + scalar_field_list.extend([f for f in sl + if f not in scalar_field_list]) + for ptype in found_fields: + found_fields[ptype] |= data_file.total_particles[ptype] + has_ids |= len(idl) > 0 + if all(found_fields.values()) and has_ids: break + + self.field_list = field_list + self.scalar_field_list = scalar_field_list + ds = self.dataset + ds.scalar_field_list = scalar_field_list + ds.particle_types = tuple(set(pt for pt, ds in field_list)) + ds.field_units.update(units) + ds.particle_types_raw = ds.particle_types + + def _identify_base_chunk(self, dobj): + pass + + def _read_particle_fields(self, fields, dobj, chunk = None): + if len(fields) == 0: return {}, [] + fields_to_read, fields_to_generate = self._split_fields(fields) + if len(fields_to_read) == 0: + return {}, fields_to_generate + fields_to_return = self.io._read_particle_selection( + dobj, fields_to_read) + return fields_to_return, fields_to_generate + + def _get_halo_file_indices(self, ptype, identifiers): + return np.digitize(identifiers, + self._halo_index_start[ptype], right=False) - 1 + + def _get_halo_scalar_index(self, ptype, identifier): + i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] + scalar_index = identifier - self._halo_index_start[ptype][i_scalar] + return scalar_index + + def _get_halo_values(self, ptype, identifiers, fields, + f=None): + """ + Get field values for halos. IDs are likely to be + sequential (or at least monotonic), but not necessarily + all within the same file. + + This does not do much to minimize file i/o, but with + halos randomly distributed across files, there's not + much more we can do. + """ + + # if a file is already open, don't open it again + filename = None if f is None \ + else f.filename -class GadgetFOFHaloDataset(HaloDataset): + data = defaultdict(lambda: np.empty(identifiers.size)) + i_scalars = self._get_halo_file_indices(ptype, identifiers) + for i_scalar in np.unique(i_scalars): + target = i_scalars == i_scalar + scalar_indices = identifiers - \ + self._halo_index_start[ptype][i_scalar] + + # only open file if it's not already open + my_f = f if self.data_files[i_scalar].filename == filename \ + else h5py.File(self.data_files[i_scalar].filename, mode="r") + + for field in fields: + data[field][target] = \ + my_f[os.path.join(ptype, field)][()][scalar_indices[target]] + + if self.data_files[i_scalar].filename != filename: my_f.close() + + return data + +class GadgetFOFHaloDataset(ParticleDataset): _index_class = GadgetFOFHaloParticleIndex _file_class = GadgetFOFHDF5File _field_info_class = GadgetFOFHaloFieldInfo def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): - super(GadgetFOFHaloDataset, self).__init__(ds, dataset_type) + self.real_ds = ds + for attr in ['filename_template', 'file_count', + 'particle_types_raw', 'particle_types', + 'periodicity']: + setattr(self, attr, getattr(self.real_ds, attr)) + + super(GadgetFOFHaloDataset, self).__init__( + self.real_ds.parameter_filename, dataset_type) -class GadgetFOFHaloContainer(HaloContainer): - def _get_member_fieldnames(self): - halo_fields = ["%sLen" % self.ptype] - if self.ptype == "Subhalo": - halo_fields.append("SubhaloGrNr") - return halo_fields + def print_key_parameters(self): + pass - def _get_particle_number(self): - return self._io_data["%sLen" % self.ptype] + def _set_derived_attrs(self): + pass - def _set_field_indices(self): - if self.ptype == "Group": + def _parse_parameter_file(self): + for attr in ["cosmological_simulation", "cosmology", + "current_redshift", "current_time", + "dimensionality", "domain_dimensions", + "domain_left_edge", "domain_right_edge", + "domain_width", "hubble_constant", + "omega_lambda", "omega_matter", + "unique_identifier"]: + setattr(self, attr, getattr(self.real_ds, attr)) + + def set_code_units(self): + for unit in ["length", "time", "mass", + "velocity", "magnetic", "temperature"]: + my_unit = "%s_unit" % unit + setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) + self.unit_registry = self.real_ds.unit_registry + + def __repr__(self): + return "%s" % self.real_ds + + def _setup_classes(self): + self.objects = [] + +class GadgetFOFHaloContainer(YTSelectionContainer): + """ + Create a data container to get member particles and individual + values from halos and subhalos. Halo mass, position, and + velocity are set as attributes. Halo IDs are accessible + through the field, "member_ids". Other fields that are one + value per halo are accessible as normal. The field list for + halo objects can be seen in `ds.halos_field_list`. + + Parameters + ---------- + ptype : string + The type of halo, either "Group" for the main halo or + "Subhalo" for subhalos. + particle_identifier : int or tuple of ints + The halo or subhalo id. If requesting a subhalo, the id + can also be given as a tuple of the main halo id and + subgroup id, such as (1, 4) for subgroup 4 of halo 1. + + Attributes + ---------- + particle_identifier : int + The id of the halo or subhalo. + group_identifier : int + For subhalos, the id of the enclosing halo. + subgroup_identifier : int + For subhalos, the relative id of the subhalo within + the enclosing halo. + particle_number : int + Number of particles in the halo. + mass : float + Halo mass. + position : array of floats + Halo position. + velocity : array of floats + Halo velocity. + + Note + ---- + Relevant Fields: + + * particle_number - number of particles + * subhalo_number - number of subhalos + * group_identifier - id of parent group for subhalos + + Examples + -------- + + >>> import yt + >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5") + >>> + >>> halo = ds.halo("Group", 0) + >>> print(halo.mass) + 13256.5517578 code_mass + >>> print(halo.position) + [ 16.18603706 6.95965052 12.52694607] code_length + >>> print(halo.velocity) + [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s + >>> print(halo["Group_R_Crit200"]) + [ 0.79668683] code_length + >>> + >>> # particle ids for this halo + >>> print(halo["member_ids"]) + [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless + >>> + >>> # get the first subhalo of this halo + >>> subhalo = ds.halo("Subhalo", (0, 0)) + >>> print(subhalo["member_ids"]) + [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless + + """ + + _type_name = "halo" + _con_args = ("ptype", "particle_identifier") + _spatial = False + # Do not register it to prevent .halo from being attached to all datasets + _skip_add = True + + def __init__(self, ptype, particle_identifier, ds=None): + if ptype not in ds.particle_types_raw: + raise RuntimeError("Possible halo types are %s, supplied \"%s\"." % + (ds.particle_types_raw, ptype)) + + self.ptype = ptype + self._current_particle_type = ptype + super(GadgetFOFHaloContainer, self).__init__(ds, {}) + + if ptype == "Subhalo" and isinstance(particle_identifier, tuple): + self.group_identifier, self.subgroup_identifier = \ + particle_identifier + my_data = self.index._get_halo_values( + "Group", np.array([self.group_identifier]), + ["GroupFirstSub"]) + self.particle_identifier = \ + np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) + else: + self.particle_identifier = particle_identifier + + if self.particle_identifier >= self.index.particle_count[ptype]: + raise RuntimeError("%s %d requested, but only %d %s objects exist." % + (ptype, particle_identifier, + self.index.particle_count[ptype], ptype)) + + # Find the file that has the scalar values for this halo. + i_scalar = self.index._get_halo_file_indices( + ptype, [self.particle_identifier])[0] + self.scalar_data_file = self.index.data_files[i_scalar] + + # index within halo arrays that corresponds to this halo + self.scalar_index = self.index._get_halo_scalar_index( + ptype, self.particle_identifier) + + halo_fields = ["%sLen" % ptype] + if ptype == "Subhalo": halo_fields.append("SubhaloGrNr") + my_data = self.index._get_halo_values( + ptype, np.array([self.particle_identifier]), + halo_fields) + self.particle_number = np.int64(my_data["%sLen" % ptype][0]) + + if ptype == "Group": self.group_identifier = self.particle_identifier id_offset = 0 # index of file that has scalar values for the group - g_scalar = self.i_scalar + g_scalar = i_scalar group_index = self.scalar_index # If a subhalo, find the index of the parent. - elif self.ptype == "Subhalo": - self.group_identifier = self._io_data["SubhaloGrNr"] + elif ptype == "Subhalo": + self.group_identifier = np.int64(my_data["SubhaloGrNr"][0]) # Find the file that has the scalar values for the parent group. g_scalar = self.index._get_halo_file_indices( @@ -425,14 +680,9 @@ def _set_field_indices(self): ).clip(max=self.index._halo_id_number[i_start : i_end + 1]) self.field_data_end = self.field_data_end.astype(np.int64) - def _set_identifiers(self, particle_identifier): - if self.ptype == "Subhalo" and isinstance(particle_identifier, tuple): - self.group_identifier, self.subgroup_identifier = \ - particle_identifier - my_data = self.index._get_halo_values( - "Group", np.array([self.group_identifier]), - ["GroupFirstSub"]) - self.particle_identifier = \ - np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) - else: - self.particle_identifier = particle_identifier + for attr in ["mass", "position", "velocity"]: + setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) + + def __repr__(self): + return "%s_%s_%09d" % \ + (self.ds, self.ptype, self.particle_identifier) diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index acdd760bf1c..76723642c40 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -2,8 +2,6 @@ import numpy as np -from yt.frontends.halo_catalog.io import \ - HaloDatasetIOHandler from yt.funcs import mylog from yt.utilities.exceptions import YTDomainOverflow from yt.utilities.io_handler import BaseIOHandler @@ -190,26 +188,88 @@ def _identify_fields(self, data_file): self.offset_fields = self.offset_fields.union(set(my_offset_fields)) return fields, {} -class IOHandlerGadgetFOFHaloHDF5(HaloDatasetIOHandler, IOHandlerGadgetFOFHDF5): +class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5): _dataset_type = "gadget_fof_halo_hdf5" - def _identify_fields(self, data_file): - fields = [] - scalar_fields = [] - id_fields = {} - with h5py.File(data_file.filename, "r") as f: - for ptype in self.ds.particle_types_raw: - fields.append((ptype, "particle_identifier")) - scalar_fields.append((ptype, "particle_identifier")) - my_fields, my_offset_fields = \ - subfind_field_list(f[ptype], ptype, data_file.total_particles) - fields.extend(my_fields) - scalar_fields.extend(my_fields) + def _read_particle_coords(self, chunks, ptf): + pass - if "IDs" not in f: continue - id_fields = [(ptype, field) for field in f["IDs"]] - fields.extend(id_fields) - return fields, scalar_fields, id_fields, {} + def _read_particle_selection(self, dobj, fields): + rv = {} + ind = {} + # We first need a set of masks for each particle type + ptf = defaultdict(list) # ON-DISK TO READ + fsize = defaultdict(lambda: 0) # COUNT RV + field_maps = defaultdict(list) # ptypes -> fields + unions = self.ds.particle_unions + # What we need is a mapping from particle types to return types + for field in fields: + ftype, fname = field + fsize[field] = 0 + # We should add a check for p.fparticle_unions or something here + if ftype in unions: + for pt in unions[ftype]: + ptf[pt].append(fname) + field_maps[pt, fname].append(field) + else: + ptf[ftype].append(fname) + field_maps[field].append(field) + + # Now we allocate + psize = {dobj.ptype: dobj.particle_number} + for field in fields: + if field[0] in unions: + for pt in unions[field[0]]: + fsize[field] += psize.get(pt, 0) + else: + fsize[field] += psize.get(field[0], 0) + for field in fields: + if field[1] in self._vector_fields: + shape = (fsize[field], self._vector_fields[field[1]]) + elif field[1] in self._array_fields: + shape = (fsize[field],)+self._array_fields[field[1]] + elif field in self.ds.scalar_field_list: + shape = (1,) + else: + shape = (fsize[field], ) + rv[field] = np.empty(shape, dtype="float64") + ind[field] = 0 + # Now we read. + for field_r, vals in self._read_particle_fields(dobj, ptf): + # Note that we now need to check the mappings + for field_f in field_maps[field_r]: + my_ind = ind[field_f] + rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals + ind[field_f] += vals.shape[0] + # Now we need to truncate all our fields, since we allow for + # over-estimating. + for field_f in ind: + rv[field_f] = rv[field_f][:ind[field_f]] + return rv + + def _read_scalar_fields(self, dobj, scalar_fields): + all_data = {} + if not scalar_fields: return all_data + pcount = 1 + with h5py.File(dobj.scalar_data_file.filename, mode="r") as f: + for ptype, field_list in sorted(scalar_fields.items()): + for field in field_list: + if field == "particle_identifier": + field_data = \ + np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ + dobj.scalar_data_file.index_start[ptype] + elif field in f[ptype]: + field_data = f[ptype][field][()].astype("float64") + else: + fname = field[:field.rfind("_")] + field_data = f[ptype][fname][()].astype("float64") + my_div = field_data.size / pcount + if my_div > 1: + findex = int(field[field.rfind("_") + 1:]) + field_data = field_data[:, findex] + data = np.array([field_data[dobj.scalar_index]]) + all_data[(ptype, field)] = data + return all_data def _read_member_fields(self, dobj, member_fields): all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64)) @@ -244,29 +304,40 @@ def _read_member_fields(self, dobj, member_fields): field_start = field_end return all_data - def _read_scalar_fields(self, dobj, scalar_fields): - all_data = {} - if not scalar_fields: return all_data - pcount = 1 - with h5py.File(dobj.scalar_data_file.filename, "r") as f: - for ptype, field_list in sorted(scalar_fields.items()): - for field in field_list: - if field == "particle_identifier": - field_data = \ - np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ - dobj.scalar_data_file.index_start[ptype] - elif field in f[ptype]: - field_data = f[ptype][field][()].astype("float64") - else: - fname = field[:field.rfind("_")] - field_data = f[ptype][fname][()].astype("float64") - my_div = field_data.size / pcount - if my_div > 1: - findex = int(field[field.rfind("_") + 1:]) - field_data = field_data[:, findex] - data = np.array([field_data[dobj.scalar_index]]) - all_data[(ptype, field)] = data - return all_data + def _read_particle_fields(self, dobj, ptf): + # separate member particle fields from scalar fields + scalar_fields = defaultdict(list) + member_fields = defaultdict(list) + for ptype, field_list in sorted(ptf.items()): + for field in field_list: + if (ptype, field) in self.ds.scalar_field_list: + scalar_fields[ptype].append(field) + else: + member_fields[ptype].append(field) + + all_data = self._read_scalar_fields(dobj, scalar_fields) + all_data.update(self._read_member_fields(dobj, member_fields)) + + for field, field_data in all_data.items(): + yield field, field_data + + def _identify_fields(self, data_file): + fields = [] + scalar_fields = [] + id_fields = {} + with h5py.File(data_file.filename, mode="r") as f: + for ptype in self.ds.particle_types_raw: + fields.append((ptype, "particle_identifier")) + scalar_fields.append((ptype, "particle_identifier")) + my_fields, my_offset_fields = \ + subfind_field_list(f[ptype], ptype, data_file.total_particles) + fields.extend(my_fields) + scalar_fields.extend(my_fields) + + if "IDs" not in f: continue + id_fields = [(ptype, field) for field in f["IDs"]] + fields.extend(id_fields) + return fields, scalar_fields, id_fields, {} def subfind_field_list(fh, ptype, pcount): @@ -302,9 +373,7 @@ def subfind_field_list(fh, ptype, pcount): fields.append(("Group", fname)) offset_fields.append(fname) else: - mylog.warning( - "Cannot add field (%s, %s) with size %d." - % (ptype, fh[field].name, fh[field].size) - ) + mylog.warning("Cannot add field (%s, %s) with size %d." % \ + (ptype, fh[field].name, fh[field].size)) continue return fields, offset_fields From 0f03ac237e9457f2f750e0553794e094bdb440d7 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 13:47:41 +0100 Subject: [PATCH 242/653] Remove unused function. --- yt/frontends/halo_catalog/io.py | 26 -------------------------- 1 file changed, 26 deletions(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 097f580b633..6ccc420ede3 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -72,32 +72,6 @@ def _read_particle_fields(self, chunks, ptf, selector): data = f[field][si:ei][mask].astype("float64") yield (ptype, field), data - def _initialize_index(self, data_file, regions): - pcount = data_file.header["num_halos"] - morton = np.empty(pcount, dtype="uint64") - mylog.debug( - "Initializing index % 5i (% 7i particles)", data_file.file_id, pcount - ) - ind = 0 - if pcount == 0: - return None - ptype = "halos" - with h5py.File(data_file.filename, mode="r") as f: - if not f.keys(): - return None - units = parse_h5_attr(f["particle_position_x"], "units") - pos = data_file._get_particle_positions(ptype, f=f) - pos = data_file.ds.arr(pos, units).to("code_length") - dle = self.ds.domain_left_edge.to("code_length") - dre = self.ds.domain_right_edge.to("code_length") - if np.any(pos.min(axis=0) < dle) or np.any(pos.max(axis=0) > dre): - raise YTDomainOverflow(pos.min(axis=0), pos.max(axis=0), dle, dre) - regions.add_data_file(pos, data_file.file_id) - morton[ind : ind + pos.shape[0]] = compute_morton( - pos[:, 0], pos[:, 1], pos[:, 2], dle, dre - ) - return morton - def _count_particles(self, data_file): si, ei = data_file.start, data_file.end nhalos = data_file.header["num_halos"] From 061e3daf2d987cf79d971f6760c4f7476915aff3 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 14:16:42 +0100 Subject: [PATCH 243/653] Reduce calls to _setup_filenames. --- yt/frontends/halo_catalog/data_structures.py | 2 +- yt/geometry/particle_geometry_handler.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index ba3c3c45080..da691e2ba48 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -306,8 +306,8 @@ def _setup_geometry(self): class HaloCatalogHaloParticleIndex(HaloDatasetParticleIndex): def _setup_data_io(self): + self.total_particles = self.ds.real_ds.index.total_particles super(HaloCatalogHaloParticleIndex, self)._setup_data_io() - self._setup_filenames() def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[field][indices] diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 626d59ff705..0ca1ef30e13 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -47,7 +47,8 @@ def convert(self, unit): def _setup_filenames(self): if hasattr(self, "data_files"): - return + raise RuntimeError( + '_setup_filenames has already been called. Something is wrong.') template = self.dataset.filename_template ndoms = self.dataset.file_count From 0a958874a2470e70ce65522adb1a8796f5c7d350 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 14:32:09 +0100 Subject: [PATCH 244/653] Remove more unnecessary calls. --- yt/frontends/halo_catalog/data_structures.py | 19 +++++-------------- 1 file changed, 5 insertions(+), 14 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index da691e2ba48..82c828aae89 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -288,27 +288,18 @@ def _read_particle_fields(self, fields, dobj, chunk = None): return fields_to_return, fields_to_generate def _setup_geometry(self): - self._setup_data_io() - if self.real_ds._instantiated_index is None: - template = self.real_ds.filename_template - ndoms = self.real_ds.file_count - cls = self.real_ds._file_class - self.data_files = \ - [cls(self.dataset, self.io, template % {'num':i}, i, None) - for i in range(ndoms)] - else: - self.data_files = self.real_ds.index.data_files + self.real_ds.index + + # inherit some things from parent index + for attr in ['data_files', 'total_particles']: + setattr(self, attr, getattr(self.real_ds.index, attr)) self._calculate_particle_index_starts() self._calculate_particle_count() self._create_halo_id_table() class HaloCatalogHaloParticleIndex(HaloDatasetParticleIndex): - def _setup_data_io(self): - self.total_particles = self.ds.real_ds.index.total_particles - super(HaloCatalogHaloParticleIndex, self)._setup_data_io() - def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[field][indices] From d67a5cd60d3f58f22a1101d8203cfab83dabcfee Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 14:56:55 +0100 Subject: [PATCH 245/653] Rename HaloCatalog to YTHaloCatalog to avoid confusion, hopefully. --- doc/source/reference/api/api.rst | 8 +-- yt/frontends/halo_catalog/api.py | 11 ++- yt/frontends/halo_catalog/data_structures.py | 71 +++++++++---------- yt/frontends/halo_catalog/fields.py | 10 +-- yt/frontends/halo_catalog/io.py | 8 +-- .../halo_catalog/tests/test_outputs.py | 6 +- 6 files changed, 60 insertions(+), 54 deletions(-) diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index 8e5068ae85d..876db45afb4 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -318,10 +318,10 @@ Halo Catalogs ~yt.frontends.gadget_fof.io.IOHandlerGadgetFOFHaloHDF5 ~yt.frontends.gadget_fof.fields.GadgetFOFFieldInfo ~yt.frontends.gadget_fof.fields.GadgetFOFHaloFieldInfo - ~yt.frontends.halo_catalog.data_structures.HaloCatalogHDF5File - ~yt.frontends.halo_catalog.data_structures.HaloCatalogDataset - ~yt.frontends.halo_catalog.fields.HaloCatalogFieldInfo - ~yt.frontends.halo_catalog.io.IOHandlerHaloCatalogHDF5 + ~yt.frontends.halo_catalog.data_structures.YTHaloCatalogFile + ~yt.frontends.halo_catalog.data_structures.YTHaloCatalogDataset + ~yt.frontends.halo_catalog.fields.YTHaloCatalogFieldInfo + ~yt.frontends.halo_catalog.io.IOHandlerYTHaloCatalog ~yt.frontends.owls_subfind.data_structures.OWLSSubfindParticleIndex ~yt.frontends.owls_subfind.data_structures.OWLSSubfindHDF5File ~yt.frontends.owls_subfind.data_structures.OWLSSubfindDataset diff --git a/yt/frontends/halo_catalog/api.py b/yt/frontends/halo_catalog/api.py index 93deea97d16..8aa2d4204ce 100644 --- a/yt/frontends/halo_catalog/api.py +++ b/yt/frontends/halo_catalog/api.py @@ -1,3 +1,8 @@ -from .data_structures import HaloCatalogDataset -from .fields import HaloCatalogFieldInfo -from .io import IOHandlerHaloCatalogHDF5 +from .data_structures import \ + YTHaloCatalogDataset + +from .io import \ + IOHandlerYTHaloCatalog + +from .fields import \ + YTHaloCatalogFieldInfo diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 82c828aae89..dfcd6da0f13 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -6,8 +6,8 @@ import weakref from .fields import \ - HaloCatalogFieldInfo, \ - HaloCatalogHaloFieldInfo + YTHaloCatalogFieldInfo, \ + YTHaloCatalogHaloFieldInfo from yt.data_objects.data_containers import \ YTSelectionContainer @@ -24,8 +24,15 @@ validate_index_order class HaloCatalogFile(ParticleFile): - def __init__(self, ds, io, filename, file_id, range): - super(HaloCatalogFile, self).__init__(ds, io, filename, file_id, range) + """ + Base class for data files of halo catalog datasets. + + This is mainly here to correct for periodicity when + reading particle positions. + """ + def __init__(self, ds, io, filename, file_id, frange): + super(HaloCatalogFile, self).__init__( + ds, io, filename, file_id, frange) def _read_particle_positions(self, ptype, f=None): raise NotImplementedError @@ -49,17 +56,19 @@ def _get_particle_positions(self, ptype, f=None): return pos - -class HaloCatalogHDF5File(HaloCatalogFile): - def __init__(self, ds, io, filename, file_id, range): - with h5py.File(filename, "r") as f: +class YTHaloCatalogFile(HaloCatalogFile): + """ + Data file class for the YTHaloCatalogDataset. + """ + def __init__(self, ds, io, filename, file_id, frange): + with h5py.File(filename, mode="r") as f: self.header = dict((field, parse_h5_attr(f, field)) \ for field in f.attrs.keys()) pids = f.get('particles/ids') self.total_ids = 0 if pids is None else pids.size self.group_length_sum = self.total_ids - super(HaloCatalogHDF5File, self).__init__( - ds, io, filename, file_id, range) + super(YTHaloCatalogFile, self).__init__( + ds, io, filename, file_id, frange) def _read_particle_positions(self, ptype, f=None): """ @@ -82,35 +91,25 @@ def _read_particle_positions(self, ptype, f=None): return pos - -class HaloCatalogDataset(SavedDataset): +class YTHaloCatalogDataset(SavedDataset): _index_class = ParticleIndex - _file_class = HaloCatalogHDF5File - _field_info_class = HaloCatalogFieldInfo + _file_class = YTHaloCatalogFile + _field_info_class = YTHaloCatalogFieldInfo _suffix = ".h5" _con_attrs = ("cosmological_simulation", "current_time", "current_redshift", "hubble_constant", "omega_matter", "omega_lambda", "domain_left_edge", "domain_right_edge") - def __init__( - self, - filename, - dataset_type="halocatalog_hdf5", - index_order=None, - units_override=None, - unit_system="cgs", - ): + def __init__(self, filename, dataset_type="ythalocatalog", + index_order=None, units_override=None, unit_system="cgs"): self.index_order = validate_index_order(index_order) - super(HaloCatalogDataset, self).__init__( - filename, - dataset_type, - units_override=units_override, - unit_system=unit_system, - ) + super(YTHaloCatalogDataset, self).__init__(filename, dataset_type, + units_override=units_override, + unit_system=unit_system) def add_field(self, *args, **kwargs): - super(HaloCatalogDataset, self).add_field(*args, **kwargs) + super(YTHaloCatalogDataset, self).add_field(*args, **kwargs) self._halos_ds.add_field(*args, **kwargs) @property @@ -129,8 +128,8 @@ def _halos_ds(self): return self._instantiated_halo_ds def _setup_classes(self): - super(HaloCatalogDataset, self)._setup_classes() - self.halo = partial(HaloCatalogHaloContainer, ds=self._halos_ds) + super(YTHaloCatalogDataset, self)._setup_classes() + self.halo = partial(YTHaloCatalogHaloContainer, ds=self._halos_ds) def _parse_parameter_file(self): self.refine_by = 2 @@ -142,7 +141,7 @@ def _parse_parameter_file(self): self.file_count = len(glob.glob(prefix + "*" + self._suffix)) self.particle_types = ("halos",) self.particle_types_raw = ("halos",) - super(HaloCatalogDataset, self)._parse_parameter_file() + super(YTHaloCatalogDataset, self)._parse_parameter_file() @classmethod def _is_valid(self, *args, **kwargs): @@ -345,10 +344,10 @@ def _setup_classes(self): class HaloCatalogHaloDataset(HaloDataset): _index_class = HaloCatalogHaloParticleIndex - _file_class = HaloCatalogHDF5File - _field_info_class = HaloCatalogHaloFieldInfo + _file_class = YTHaloCatalogFile + _field_info_class = YTHaloCatalogHaloFieldInfo - def __init__(self, ds, dataset_type="halo_catalog_halo_hdf5"): + def __init__(self, ds, dataset_type="ythalocatalog_halo"): super(HaloCatalogHaloDataset, self).__init__(ds, dataset_type) class HaloContainer(YTSelectionContainer): @@ -491,7 +490,7 @@ def __repr__(self): return "%s_%s_%09d" % \ (self.ds, self.ptype, self.particle_identifier) -class HaloCatalogHaloContainer(HaloContainer): +class YTHaloCatalogHaloContainer(HaloContainer): def _get_member_fieldnames(self): return ['particle_number', 'particle_index_start'] diff --git a/yt/frontends/halo_catalog/fields.py b/yt/frontends/halo_catalog/fields.py index b498ae74b61..4bf9f1adfc7 100644 --- a/yt/frontends/halo_catalog/fields.py +++ b/yt/frontends/halo_catalog/fields.py @@ -17,13 +17,15 @@ ("virial_radius", (r_units, [], "Virial Radius")), ) -class HaloCatalogFieldInfo(FieldInfoContainer): - known_other_fields = () +class YTHaloCatalogFieldInfo(FieldInfoContainer): + known_other_fields = ( + ) known_particle_fields = _particle_fields -class HaloCatalogHaloFieldInfo(FieldInfoContainer): - known_other_fields = () +class YTHaloCatalogHaloFieldInfo(FieldInfoContainer): + known_other_fields = ( + ) known_particle_fields = _particle_fields + \ ( diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 6ccc420ede3..2d7289c5d17 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -10,8 +10,8 @@ from yt.utilities.on_demand_imports import _h5py as h5py -class IOHandlerHaloCatalogHDF5(BaseIOHandler): - _dataset_type = "halocatalog_hdf5" +class IOHandlerYTHaloCatalog(BaseIOHandler): + _dataset_type = "ythalocatalog" def _read_fluid_selection(self, chunks, selector, fields, size): raise NotImplementedError @@ -166,8 +166,8 @@ def _read_particle_selection(self, dobj, fields): rv[field_f] = rv[field_f][:ind[field_f]] return rv -class IOHandlerHaloCatalogHaloHDF5(HaloDatasetIOHandler, IOHandlerHaloCatalogHDF5): - _dataset_type = "halo_catalog_halo_hdf5" +class IOHandlerYTHaloCatalogHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): + _dataset_type = "ythalocatalog_halo" def _identify_fields(self, data_file): with h5py.File(data_file.filename, "r") as f: diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 819fa23d062..c0d314bf7fb 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -3,7 +3,7 @@ from yt.convenience import \ load as yt_load from yt.frontends.halo_catalog.data_structures import \ - HaloCatalogDataset + YTHaloCatalogDataset from yt.frontends.ytdata.utilities import \ save_as_dataset from yt.testing import \ @@ -57,7 +57,7 @@ def test_halo_catalog(self): fn = fake_halo_catalog(data) ds = yt_load(fn) - assert isinstance(ds, HaloCatalogDataset) + assert isinstance(ds, YTHaloCatalogDataset) for field in fields: f1 = data[field].in_base() @@ -90,7 +90,7 @@ def test_halo_catalog_boundary_particles(self): fn = fake_halo_catalog(data) ds = yt_load(fn) - assert isinstance(ds, HaloCatalogDataset) + assert isinstance(ds, YTHaloCatalogDataset) for field in ["particle_mass"]: f1 = data[field].in_base() From 48a095ef36d572eaa1c62ac68bd147b14b447c25 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 15:44:55 +0100 Subject: [PATCH 246/653] Fix up docstring and a couple more class names. --- yt/frontends/halo_catalog/data_structures.py | 184 +++++++++---------- 1 file changed, 84 insertions(+), 100 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index dfcd6da0f13..f55e2e1e219 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -92,6 +92,12 @@ def _read_particle_positions(self, ptype, f=None): return pos class YTHaloCatalogDataset(SavedDataset): + """ + Dataset class for halo catalogs made with yt. + + This covers yt FoF/HoP halo finders and the halo analysis + in yt_astro_analysis. + """ _index_class = ParticleIndex _file_class = YTHaloCatalogFile _field_info_class = YTHaloCatalogFieldInfo @@ -124,12 +130,13 @@ def halos_derived_field_list(self): @property def _halos_ds(self): if self._instantiated_halo_ds is None: - self._instantiated_halo_ds = HaloCatalogHaloDataset(self) + self._instantiated_halo_ds = YTHaloDataset(self) return self._instantiated_halo_ds def _setup_classes(self): super(YTHaloCatalogDataset, self)._setup_classes() self.halo = partial(YTHaloCatalogHaloContainer, ds=self._halos_ds) + self.halo.__doc__ = YTHaloCatalogHaloContainer.__doc__ def _parse_parameter_file(self): self.refine_by = 2 @@ -155,18 +162,14 @@ def _is_valid(self, *args, **kwargs): return True return False -class HaloCatalogParticleIndex(ParticleIndex): +class YTHaloParticleIndex(ParticleIndex): """ - Base class for halo catalog datasets. + Particle index for getting halo particles from YTHaloCatalogDatasets. """ - def _calculate_particle_count(self): - """ - Calculate the total number of each type of particle. - """ - self.particle_count = \ - dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw]) + def __init__(self, ds, dataset_type): + self.real_ds = weakref.proxy(ds.real_ds) + super(YTHaloParticleIndex, self).__init__(ds, dataset_type) def _calculate_particle_index_starts(self): """ @@ -187,15 +190,6 @@ def _calculate_particle_index_starts(self): for data_file in self.data_files])) for ptype in self.ds.particle_types_raw]) -class HaloDatasetParticleIndex(HaloCatalogParticleIndex): - """ - Base class for particle index objects that read halo member particles. - """ - - def __init__(self, ds, dataset_type): - self.real_ds = weakref.proxy(ds.real_ds) - super(HaloDatasetParticleIndex, self).__init__(ds, dataset_type) - def _create_halo_id_table(self): pass @@ -203,10 +197,11 @@ def _detect_output_fields(self): field_list = [] scalar_field_list = [] units = {} - found_fields = \ - dict([(ptype, False) - for ptype, pnum in self.particle_count.items() - if pnum > 0]) + pc = dict([(ptype, sum([d.total_particles[ptype] + for d in self.data_files])) + for ptype in self.ds.particle_types_raw]) + found_fields = dict([(ptype, False) for ptype, pnum in pc.items() + if pnum > 0]) has_ids = False for data_file in self.data_files: @@ -277,6 +272,9 @@ def _get_halo_values(self, ptype, identifiers, fields, def _identify_base_chunk(self, dobj): pass + def _read_halo_particle_field(self, fh, ptype, field, indices): + return fh[field][indices] + def _read_particle_fields(self, fields, dobj, chunk = None): if len(fields) == 0: return {}, [] fields_to_read, fields_to_generate = self._split_fields(fields) @@ -286,7 +284,8 @@ def _read_particle_fields(self, fields, dobj, chunk = None): dobj, fields_to_read) return fields_to_return, fields_to_generate - def _setup_geometry(self): + def _setup_data_io(self): + super(YTHaloParticleIndex, self)._setup_data_io() if self.real_ds._instantiated_index is None: self.real_ds.index @@ -295,14 +294,13 @@ def _setup_geometry(self): setattr(self, attr, getattr(self.real_ds.index, attr)) self._calculate_particle_index_starts() - self._calculate_particle_count() self._create_halo_id_table() -class HaloCatalogHaloParticleIndex(HaloDatasetParticleIndex): - def _read_halo_particle_field(self, fh, ptype, field, indices): - return fh[field][indices] - class HaloDataset(ParticleDataset): + """ + Base class for dataset accessing particles from halo catalogs. + """ + def __init__(self, ds, dataset_type): self.real_ds = ds for attr in ['filename_template', 'file_count', @@ -342,84 +340,21 @@ def __repr__(self): def _setup_classes(self): self.objects = [] -class HaloCatalogHaloDataset(HaloDataset): - _index_class = HaloCatalogHaloParticleIndex +class YTHaloDataset(HaloDataset): + """ + Dataset used for accessing member particles from YTHaloCatalogDatasets. + """ + + _index_class = YTHaloParticleIndex _file_class = YTHaloCatalogFile _field_info_class = YTHaloCatalogHaloFieldInfo def __init__(self, ds, dataset_type="ythalocatalog_halo"): - super(HaloCatalogHaloDataset, self).__init__(ds, dataset_type) + super(YTHaloDataset, self).__init__(ds, dataset_type) class HaloContainer(YTSelectionContainer): """ - Create a data container to get member particles and individual - values from halos and subhalos. Halo mass, position, and - velocity are set as attributes. Halo IDs are accessible - through the field, "member_ids". Other fields that are one - value per halo are accessible as normal. The field list for - halo objects can be seen in `ds.halos_field_list`. - - Parameters - ---------- - ptype : string - The type of halo. Possible options can be found by - inspecting the value of ds.particle_types_raw. - particle_identifier : int or tuple of ints - The halo or subhalo id. If requesting a subhalo, the id - can also be given as a tuple of the main halo id and - subgroup id, such as (1, 4) for subgroup 4 of halo 1. - - Attributes - ---------- - particle_identifier : int - The id of the halo or subhalo. - group_identifier : int - For subhalos, the id of the enclosing halo. - subgroup_identifier : int - For subhalos, the relative id of the subhalo within - the enclosing halo. - particle_number : int - Number of particles in the halo. - mass : float - Halo mass. - position : array of floats - Halo position. - velocity : array of floats - Halo velocity. - - Note - ---- - Relevant Fields: - - * particle_number - number of particles - * subhalo_number - number of subhalos - * group_identifier - id of parent group for subhalos - - Examples - -------- - - >>> import yt - >>> ds = yt.load("gadget_halos/data/groups_298/fof_subhalo_tab_298.0.hdf5") - >>> - >>> halo = ds.halo("Group", 0) - >>> print(halo.mass) - 13256.5517578 code_mass - >>> print(halo.position) - [ 16.18603706 6.95965052 12.52694607] code_length - >>> print(halo.velocity) - [ 6943694.22793569 -762788.90647454 -794749.63819757] cm/s - >>> print(halo["Group_R_Crit200"]) - [ 0.79668683] code_length - >>> - >>> # particle ids for this halo - >>> print(halo["member_ids"]) - [ 723631. 690744. 854212. ..., 608589. 905551. 1147449.] dimensionless - >>> - >>> # get the first subhalo of this halo - >>> subhalo = ds.halo("Subhalo", (0, 0)) - >>> print(subhalo["member_ids"]) - [ 723631. 690744. 854212. ..., 808362. 956359. 1248821.] dimensionless - + Base class for data containers providing halo particles. """ _type_name = "halo" @@ -464,6 +399,13 @@ def mass(self): self._mass = self[self.ptype, "particle_mass"][0] return self._mass + _radius = None + @property + def radius(self): + if self._radius is None: + self._radius = self[self.ptype, "virial_radius"][0] + return self._radius + _position = None @property def position(self): @@ -491,6 +433,48 @@ def __repr__(self): (self.ds, self.ptype, self.particle_identifier) class YTHaloCatalogHaloContainer(HaloContainer): + """ + Data container for accessing particles from a halo. + + Create a data container to get member particles and individual + values from halos and subhalos. Halo mass, radius, position, and + velocity are set as attributes. Halo IDs are accessible + through the field, "member_ids". Other fields that are one + value per halo are accessible as normal. The field list for + halo objects can be seen in `ds.halos_field_list`. + + Parameters + ---------- + ptype : string + The type of halo. Possible options can be found by + inspecting the value of ds.particle_types_raw. + particle_identifier : int + The halo id. + + Examples + -------- + + >>> import yt + >>> ds = yt.load("tiny_fof_halos/DD0046/DD0046.0.h5") + >>> + >>> halo = ds.halo("halos", 0) + >>> print(halo.particle_identifier) + 0 + >>> print(halo.mass) + 8724990744704.453 Msun + >>> print (halo.radius) + 658.8140635766607 kpc + >>> print(halo.position) + [0.05496909 0.19451951 0.04056824] code_length + >>> print(halo.velocity) + [7034181.07118151 5323471.09102874 3234522.50495914] cm/s + >>> # particle ids for this halo + >>> print(halo["member_ids"]) + [ 1248. 129. 128. 31999. 31969. 31933. 31934. 159. 31903. 31841. ... + 2241. 2240. 2239. 2177. 2209. 2207. 2208.] dimensionless + + """ + def _get_member_fieldnames(self): return ['particle_number', 'particle_index_start'] From 515c2fb511cf090a58f918fbe3655b5f13842688 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 15:47:54 +0100 Subject: [PATCH 247/653] Remove imports. --- yt/frontends/halo_catalog/io.py | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 2d7289c5d17..684711f4e37 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -2,13 +2,13 @@ defaultdict import numpy as np -from yt.funcs import mylog, parse_h5_attr -from yt.units.yt_array import uvstack -from yt.utilities.exceptions import YTDomainOverflow -from yt.utilities.io_handler import BaseIOHandler -from yt.utilities.lib.geometry_utils import compute_morton +from yt.funcs import \ + parse_h5_attr +from yt.units.yt_array import \ + uvstack from yt.utilities.on_demand_imports import _h5py as h5py - +from yt.utilities.io_handler import \ + BaseIOHandler class IOHandlerYTHaloCatalog(BaseIOHandler): _dataset_type = "ythalocatalog" From 7c25f474de679017fa56fdf46aa6e4dc267e5324 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 16:01:01 +0100 Subject: [PATCH 248/653] Simplify comment. --- yt/frontends/halo_catalog/data_structures.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index f55e2e1e219..8825be2c57a 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -236,13 +236,7 @@ def _get_halo_scalar_index(self, ptype, identifier): def _get_halo_values(self, ptype, identifiers, fields, f=None): """ - Get field values for halos. IDs are likely to be - sequential (or at least monotonic), but not necessarily - all within the same file. - - This does not do much to minimize file i/o, but with - halos randomly distributed across files, there's not - much more we can do. + Get field values for halo data containers. """ # if a file is already open, don't open it again From b95e9ad2c622e685c90ca70cb81b390a85aae709 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 16:09:07 +0100 Subject: [PATCH 249/653] Rename some halo classes. --- yt/frontends/halo_catalog/data_structures.py | 2 +- yt/frontends/halo_catalog/io.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 8825be2c57a..90042c6de59 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -343,7 +343,7 @@ class YTHaloDataset(HaloDataset): _file_class = YTHaloCatalogFile _field_info_class = YTHaloCatalogHaloFieldInfo - def __init__(self, ds, dataset_type="ythalocatalog_halo"): + def __init__(self, ds, dataset_type="ythalo"): super(YTHaloDataset, self).__init__(ds, dataset_type) class HaloContainer(YTSelectionContainer): diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 684711f4e37..e506f14f150 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -166,8 +166,8 @@ def _read_particle_selection(self, dobj, fields): rv[field_f] = rv[field_f][:ind[field_f]] return rv -class IOHandlerYTHaloCatalogHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): - _dataset_type = "ythalocatalog_halo" +class IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): + _dataset_type = "ythalo" def _identify_fields(self, data_file): with h5py.File(data_file.filename, "r") as f: From 023a9552f6a581f0a11f1c40af5e1411ecda00a7 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 13 Jul 2020 17:11:56 +0100 Subject: [PATCH 250/653] Add narrative docs for accessing halo particles. --- doc/source/examining/loading_data.rst | 25 ++++++++++++++++++++++--- 1 file changed, 22 insertions(+), 3 deletions(-) diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index fb0224a8607..9e99d1801ed 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -2222,8 +2222,8 @@ information. At this time, halo member particles cannot be loaded. .. _halocatalog: -HaloCatalog -^^^^^^^^^^^ +YTHaloCatalog +^^^^^^^^^^^^^ These are catalogs produced by the analysis discussed in :ref:`halo_catalog`. In the case where multiple files were produced, one need only provide the path @@ -2248,11 +2248,30 @@ available here are similar to other catalogs. Any addition .. code-block:: python import yt - ds = yt.load("catalogs/catalog.0.h5") + ds = yt.load("tiny_fof_halos/DD0046/DD0046.0.h5") ad = ds.all_data() # The halo mass print(ad["halos", "particle_mass"]) +Halo Data Containers +"""""""""""""""""""" + +Halo particles can be accessed by creating halo data containers with the +type of halo ("halos") and the halo id and then querying the "member_ids" +field. Halo containers have mass, radius, position, and velocity +attributes. Additional fields for which there will be one value per halo +can be accessed in the same manner as conventional data containers. + +.. code-block:: python + + halo = ds.halo("halos", 0) + # particles for this halo + print(halo["member_ids"]) + # halo properties + print(halo.mass, halo.radius, halo.position, halo.velocity) + # any other fields + print(halo[]) + .. _loading-openpmd-data: openPMD Data From 6d0d214673c0cc40e8f3a641cc165c5549ad94b6 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 15:55:59 +0200 Subject: [PATCH 251/653] Make abstract what is --- yt/frontends/ramses/field_handlers.py | 11 ++++++----- yt/frontends/ramses/particle_handlers.py | 11 ++++++----- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 21bb3fdebf2..2f024ad9a56 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -1,3 +1,4 @@ +import abc import glob import os @@ -22,14 +23,14 @@ def register_field_handler(ph): DETECTED_FIELDS = {} -class RAMSESFieldFileHandlerRegistry(type): +class RAMSESFieldFileHandlerRegistry(abc.ABCMeta): """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. """ def __new__(meta, name, bases, class_dict): - cls = type.__new__(meta, name, bases, class_dict) + cls = abc.ABCMeta.__new__(meta, name, bases, class_dict) if cls.ftype is not None: register_field_handler(cls) @@ -136,7 +137,7 @@ def any_exist(cls, ds): return exists -class FieldFileHandler(HandlerMixin, metaclass=RAMSESFieldFileHandlerRegistry): +class FieldFileHandler(abc.ABC, HandlerMixin, metaclass=RAMSESFieldFileHandlerRegistry): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -167,6 +168,7 @@ def __init__(self, domain): self.setup_handler(domain) @classmethod + @abc.abstractmethod def detect_fields(cls, ds): """ Called once to setup the fields of this type @@ -179,8 +181,7 @@ def detect_fields(cls, ds): * field_list: list of (ftype, fname) The list of the field present in the file """ - # this function must be implemented by subclasses - raise NotImplementedError + pass @classmethod def get_detected_fields(cls, ds): diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 3b13d2ed85b..3bedf1ef66e 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -1,3 +1,4 @@ +import abc import os from yt.config import ytcfg @@ -18,21 +19,21 @@ def register_particle_handler(ph): PARTICLE_HANDLERS.add(ph) -class RAMSESParticleFileHandlerRegistry(type): +class RAMSESParticleFileHandlerRegistry(abc.ABCMeta): """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. """ def __new__(meta, name, bases, class_dict): - cls = type.__new__(meta, name, bases, class_dict) + cls = abc.ABCMeta.__new__(meta, name, bases, class_dict) if cls.ptype is not None: register_particle_handler(cls) cls._unique_registry = {} return cls -class ParticleFileHandler(HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): +class ParticleFileHandler(abc.ABC, HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): ''' Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -72,6 +73,7 @@ def __init__(self, domain): known_fields.append((field, field_type)) self.known_fields = known_fields + @abc.abstractmethod def read_header(self): """ This function is called once per file. It should: @@ -89,8 +91,7 @@ def read_header(self): A dictionary that maps `(type, field_name)` to their type (character), following Python's struct convention. """ - # this function must be implemented by subclasses - raise NotImplementedError + pass class DefaultParticleFileHandler(ParticleFileHandler): From d09ca5bed1b0b48a3efdd7fab69757eadecae0ab Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 15:59:37 +0200 Subject: [PATCH 252/653] flaking --- yt/frontends/ramses/particle_handlers.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 85faaeb2477..258d1b5a507 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -7,7 +7,6 @@ from .field_handlers import HandlerMixin from .io import _read_part_file_descriptor -from .field_handlers import HandlerMixin PARTICLE_HANDLERS = set() @@ -34,7 +33,10 @@ def __new__(meta, name, bases, class_dict): cls._unique_registry = {} return cls -class ParticleFileHandler(abc.ABC, HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry): + +class ParticleFileHandler( + abc.ABC, HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry +): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -44,7 +46,7 @@ class ParticleFileHandler(abc.ABC, HandlerMixin, metaclass=RAMSESParticleFileHan See `SinkParticleFileHandler` for an example implementation.""" - _file_type = 'particle' + _file_type = "particle" _file_type = "particle" @@ -145,9 +147,7 @@ def read_header(self): extra_particle_fields = self.ds._extra_particle_fields if self.has_descriptor: - particle_fields = ( - _read_part_file_descriptor(self.file_descriptor) - ) + particle_fields = _read_part_file_descriptor(self.file_descriptor) else: particle_fields = list(self.known_fields) @@ -261,9 +261,7 @@ def read_header(self): # Read the fields + add the sink properties if self.has_descriptor: - fields = ( - _read_part_file_descriptor(self.file_descriptor) - ) + fields = _read_part_file_descriptor(self.file_descriptor) else: fields = list(self.known_fields) From 4b121eed912efa014caec72b88368cc648ea1b67 Mon Sep 17 00:00:00 2001 From: yt-fido Date: Wed, 5 Aug 2020 14:04:39 +0000 Subject: [PATCH 253/653] [format-command] fixes --- doc/source/cookbook/amrkdtree_downsampling.py | 9 +- yt/frontends/adaptahop/data_structures.py | 40 +-- yt/frontends/gadget_fof/api.py | 2 +- yt/frontends/gadget_fof/data_structures.py | 198 +++++++------- yt/frontends/gadget_fof/io.py | 43 +-- yt/frontends/halo_catalog/api.py | 11 +- yt/frontends/halo_catalog/data_structures.py | 248 +++++++++++------- yt/frontends/halo_catalog/fields.py | 13 +- yt/frontends/halo_catalog/io.py | 68 ++--- .../halo_catalog/tests/test_outputs.py | 66 ++--- yt/geometry/particle_geometry_handler.py | 3 +- 11 files changed, 391 insertions(+), 310 deletions(-) diff --git a/doc/source/cookbook/amrkdtree_downsampling.py b/doc/source/cookbook/amrkdtree_downsampling.py index 8abe9decdcc..6e7970f545b 100644 --- a/doc/source/cookbook/amrkdtree_downsampling.py +++ b/doc/source/cookbook/amrkdtree_downsampling.py @@ -71,8 +71,13 @@ alpha=10.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], - alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r') +tf.add_layers( + 4, + 0.01, + col_bounds=[-27.5, -25.5], + alpha=10.0 * np.ones(4, dtype="float64"), + colormap="RdBu_r", +) sc.save("v4.png", sigma_clip=6.0) # ## This looks pretty good, now lets go back to the full resolution AMRKDTree diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 00c0c39028a..5c3406ff993 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -11,38 +11,42 @@ import re import stat -from yt.data_objects.data_containers import \ - YTSelectionContainer -from yt.data_objects.static_output import \ - Dataset -from yt.frontends.halo_catalog.data_structures import \ - HaloCatalogFile -from yt.funcs import \ - setdefaultattr -from yt.geometry.particle_geometry_handler import \ - ParticleIndex -from yt.utilities.cython_fortran_utils import FortranFile +from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.static_output import Dataset +from yt.frontends.halo_catalog.data_structures import HaloCatalogFile +from yt.funcs import setdefaultattr +from yt.geometry.particle_geometry_handler import ParticleIndex from yt.units import Mpc from yt.utilities.cython_fortran_utils import FortranFile from .definitions import HEADER_ATTRIBUTES from .fields import AdaptaHOPFieldInfo + class AdaptaHOPParticleIndex(ParticleIndex): def _setup_filenames(self): template = self.dataset.filename_template ndoms = self.dataset.file_count cls = self.dataset._file_class if ndoms > 1: - self.data_files = \ - [cls(self.dataset, self.io, template % {'num':i}, i, range=None) - for i in range(ndoms)] + self.data_files = [ + cls(self.dataset, self.io, template % {"num": i}, i, range=None) + for i in range(ndoms) + ] else: - self.data_files = \ - [cls(self.dataset, self.io, - self.dataset.parameter_filename, 0, range=None)] + self.data_files = [ + cls( + self.dataset, + self.io, + self.dataset.parameter_filename, + 0, + range=None, + ) + ] self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files) + sum(d.total_particles.values()) for d in self.data_files + ) + class AdaptaHOPDataset(Dataset): _index_class = AdaptaHOPParticleIndex diff --git a/yt/frontends/gadget_fof/api.py b/yt/frontends/gadget_fof/api.py index 7a59e55149c..8901ed784ee 100644 --- a/yt/frontends/gadget_fof/api.py +++ b/yt/frontends/gadget_fof/api.py @@ -1,11 +1,11 @@ from . import tests from .data_structures import ( GadgetFOFDataset, + GadgetFOFHaloContainer, GadgetFOFHaloDataset, GadgetFOFHaloParticleIndex, GadgetFOFHDF5File, GadgetFOFParticleIndex, - GadgetFOFHaloContainer, ) from .fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo from .io import IOHandlerGadgetFOFHaloHDF5, IOHandlerGadgetFOFHDF5 diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 661dad579da..bfbd6f70fe7 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -1,39 +1,32 @@ +import os +import weakref from collections import defaultdict from functools import partial import numpy as np -import os -import weakref -from yt.data_objects.data_containers import \ - YTSelectionContainer -from yt.data_objects.static_output import \ - ParticleDataset -from yt.frontends.gadget.data_structures import \ - _fix_unit_ordering -from yt.frontends.gadget_fof.fields import \ - GadgetFOFFieldInfo, \ - GadgetFOFHaloFieldInfo -from yt.frontends.halo_catalog.data_structures import \ - HaloCatalogFile -from yt.funcs import \ - only_on_root, \ - setdefaultattr -from yt.geometry.particle_geometry_handler import \ - ParticleIndex -from yt.utilities.cosmology import \ - Cosmology -from yt.utilities.logger import ytLogger as \ - mylog +from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.static_output import ParticleDataset +from yt.frontends.gadget.data_structures import _fix_unit_ordering +from yt.frontends.gadget_fof.fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo +from yt.frontends.halo_catalog.data_structures import HaloCatalogFile +from yt.funcs import only_on_root, setdefaultattr +from yt.geometry.particle_geometry_handler import ParticleIndex +from yt.utilities.cosmology import Cosmology +from yt.utilities.logger import ytLogger as mylog + class GadgetFOFParticleIndex(ParticleIndex): def _calculate_particle_count(self): """ Calculate the total number of each type of particle. """ - self.particle_count = \ - dict([(ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw]) + self.particle_count = dict( + [ + (ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw + ] + ) def _calculate_particle_index_starts(self): # Halo indices are not saved in the file, so we must count by hand. @@ -41,17 +34,25 @@ def _calculate_particle_index_starts(self): particle_count = defaultdict(int) offset_count = 0 for data_file in self.data_files: - data_file.index_start = dict([(ptype, particle_count[ptype]) for - ptype in data_file.total_particles]) + data_file.index_start = dict( + [(ptype, particle_count[ptype]) for ptype in data_file.total_particles] + ) data_file.offset_start = offset_count for ptype in data_file.total_particles: particle_count[ptype] += data_file.total_particles[ptype] offset_count += data_file.total_offset - self._halo_index_start = \ - dict([(ptype, np.array([data_file.index_start[ptype] - for data_file in self.data_files])) - for ptype in self.ds.particle_types_raw]) + self._halo_index_start = dict( + [ + ( + ptype, + np.array( + [data_file.index_start[ptype] for data_file in self.data_files] + ), + ) + for ptype in self.ds.particle_types_raw + ] + ) def _calculate_file_offset_map(self): # After the FOF is performed, a load-balancing step redistributes halos @@ -321,6 +322,7 @@ def _is_valid(self, *args, **kwargs): pass return valid + class GadgetFOFHaloParticleIndex(GadgetFOFParticleIndex): def __init__(self, ds, dataset_type): self.real_ds = weakref.proxy(ds.real_ds) @@ -349,23 +351,21 @@ def _detect_output_fields(self): field_list = [] scalar_field_list = [] units = {} - found_fields = \ - dict([(ptype, False) - for ptype, pnum in self.particle_count.items() - if pnum > 0]) + found_fields = dict( + [(ptype, False) for ptype, pnum in self.particle_count.items() if pnum > 0] + ) has_ids = False for data_file in self.data_files: fl, sl, idl, _units = self.io._identify_fields(data_file) units.update(_units) - field_list.extend([f for f in fl - if f not in field_list]) - scalar_field_list.extend([f for f in sl - if f not in scalar_field_list]) + field_list.extend([f for f in fl if f not in field_list]) + scalar_field_list.extend([f for f in sl if f not in scalar_field_list]) for ptype in found_fields: found_fields[ptype] |= data_file.total_particles[ptype] has_ids |= len(idl) > 0 - if all(found_fields.values()) and has_ids: break + if all(found_fields.values()) and has_ids: + break self.field_list = field_list self.scalar_field_list = scalar_field_list @@ -378,26 +378,24 @@ def _detect_output_fields(self): def _identify_base_chunk(self, dobj): pass - def _read_particle_fields(self, fields, dobj, chunk = None): - if len(fields) == 0: return {}, [] + def _read_particle_fields(self, fields, dobj, chunk=None): + if len(fields) == 0: + return {}, [] fields_to_read, fields_to_generate = self._split_fields(fields) if len(fields_to_read) == 0: return {}, fields_to_generate - fields_to_return = self.io._read_particle_selection( - dobj, fields_to_read) + fields_to_return = self.io._read_particle_selection(dobj, fields_to_read) return fields_to_return, fields_to_generate def _get_halo_file_indices(self, ptype, identifiers): - return np.digitize(identifiers, - self._halo_index_start[ptype], right=False) - 1 + return np.digitize(identifiers, self._halo_index_start[ptype], right=False) - 1 def _get_halo_scalar_index(self, ptype, identifier): i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] scalar_index = identifier - self._halo_index_start[ptype][i_scalar] return scalar_index - def _get_halo_values(self, ptype, identifiers, fields, - f=None): + def _get_halo_values(self, ptype, identifiers, fields, f=None): """ Get field values for halos. IDs are likely to be sequential (or at least monotonic), but not necessarily @@ -409,28 +407,32 @@ def _get_halo_values(self, ptype, identifiers, fields, """ # if a file is already open, don't open it again - filename = None if f is None \ - else f.filename + filename = None if f is None else f.filename data = defaultdict(lambda: np.empty(identifiers.size)) i_scalars = self._get_halo_file_indices(ptype, identifiers) for i_scalar in np.unique(i_scalars): target = i_scalars == i_scalar - scalar_indices = identifiers - \ - self._halo_index_start[ptype][i_scalar] + scalar_indices = identifiers - self._halo_index_start[ptype][i_scalar] # only open file if it's not already open - my_f = f if self.data_files[i_scalar].filename == filename \ - else h5py.File(self.data_files[i_scalar].filename, mode="r") + my_f = ( + f + if self.data_files[i_scalar].filename == filename + else h5py.File(self.data_files[i_scalar].filename, mode="r") + ) for field in fields: - data[field][target] = \ - my_f[os.path.join(ptype, field)][()][scalar_indices[target]] + data[field][target] = my_f[os.path.join(ptype, field)][()][ + scalar_indices[target] + ] - if self.data_files[i_scalar].filename != filename: my_f.close() + if self.data_files[i_scalar].filename != filename: + my_f.close() return data + class GadgetFOFHaloDataset(ParticleDataset): _index_class = GadgetFOFHaloParticleIndex _file_class = GadgetFOFHDF5File @@ -438,13 +440,18 @@ class GadgetFOFHaloDataset(ParticleDataset): def __init__(self, ds, dataset_type="gadget_fof_halo_hdf5"): self.real_ds = ds - for attr in ['filename_template', 'file_count', - 'particle_types_raw', 'particle_types', - 'periodicity']: + for attr in [ + "filename_template", + "file_count", + "particle_types_raw", + "particle_types", + "periodicity", + ]: setattr(self, attr, getattr(self.real_ds, attr)) super(GadgetFOFHaloDataset, self).__init__( - self.real_ds.parameter_filename, dataset_type) + self.real_ds.parameter_filename, dataset_type + ) def print_key_parameters(self): pass @@ -453,18 +460,25 @@ def _set_derived_attrs(self): pass def _parse_parameter_file(self): - for attr in ["cosmological_simulation", "cosmology", - "current_redshift", "current_time", - "dimensionality", "domain_dimensions", - "domain_left_edge", "domain_right_edge", - "domain_width", "hubble_constant", - "omega_lambda", "omega_matter", - "unique_identifier"]: + for attr in [ + "cosmological_simulation", + "cosmology", + "current_redshift", + "current_time", + "dimensionality", + "domain_dimensions", + "domain_left_edge", + "domain_right_edge", + "domain_width", + "hubble_constant", + "omega_lambda", + "omega_matter", + "unique_identifier", + ]: setattr(self, attr, getattr(self.real_ds, attr)) def set_code_units(self): - for unit in ["length", "time", "mass", - "velocity", "magnetic", "temperature"]: + for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: my_unit = "%s_unit" % unit setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) self.unit_registry = self.real_ds.unit_registry @@ -475,6 +489,7 @@ def __repr__(self): def _setup_classes(self): self.objects = [] + class GadgetFOFHaloContainer(YTSelectionContainer): """ Create a data container to get member particles and individual @@ -555,43 +570,49 @@ class GadgetFOFHaloContainer(YTSelectionContainer): def __init__(self, ptype, particle_identifier, ds=None): if ptype not in ds.particle_types_raw: - raise RuntimeError("Possible halo types are %s, supplied \"%s\"." % - (ds.particle_types_raw, ptype)) + raise RuntimeError( + 'Possible halo types are %s, supplied "%s".' + % (ds.particle_types_raw, ptype) + ) self.ptype = ptype self._current_particle_type = ptype super(GadgetFOFHaloContainer, self).__init__(ds, {}) if ptype == "Subhalo" and isinstance(particle_identifier, tuple): - self.group_identifier, self.subgroup_identifier = \ - particle_identifier + self.group_identifier, self.subgroup_identifier = particle_identifier my_data = self.index._get_halo_values( - "Group", np.array([self.group_identifier]), - ["GroupFirstSub"]) - self.particle_identifier = \ - np.int64(my_data["GroupFirstSub"][0] + self.subgroup_identifier) + "Group", np.array([self.group_identifier]), ["GroupFirstSub"] + ) + self.particle_identifier = np.int64( + my_data["GroupFirstSub"][0] + self.subgroup_identifier + ) else: self.particle_identifier = particle_identifier if self.particle_identifier >= self.index.particle_count[ptype]: - raise RuntimeError("%s %d requested, but only %d %s objects exist." % - (ptype, particle_identifier, - self.index.particle_count[ptype], ptype)) + raise RuntimeError( + "%s %d requested, but only %d %s objects exist." + % (ptype, particle_identifier, self.index.particle_count[ptype], ptype) + ) # Find the file that has the scalar values for this halo. - i_scalar = self.index._get_halo_file_indices( - ptype, [self.particle_identifier])[0] + i_scalar = self.index._get_halo_file_indices(ptype, [self.particle_identifier])[ + 0 + ] self.scalar_data_file = self.index.data_files[i_scalar] # index within halo arrays that corresponds to this halo self.scalar_index = self.index._get_halo_scalar_index( - ptype, self.particle_identifier) + ptype, self.particle_identifier + ) halo_fields = ["%sLen" % ptype] - if ptype == "Subhalo": halo_fields.append("SubhaloGrNr") + if ptype == "Subhalo": + halo_fields.append("SubhaloGrNr") my_data = self.index._get_halo_values( - ptype, np.array([self.particle_identifier]), - halo_fields) + ptype, np.array([self.particle_identifier]), halo_fields + ) self.particle_number = np.int64(my_data["%sLen" % ptype][0]) if ptype == "Group": @@ -684,5 +705,4 @@ def __init__(self, ptype, particle_identifier, ds=None): setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) def __repr__(self): - return "%s_%s_%09d" % \ - (self.ds, self.ptype, self.particle_identifier) + return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier) diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index 76723642c40..ee621e6cb0e 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -188,6 +188,7 @@ def _identify_fields(self, data_file): self.offset_fields = self.offset_fields.union(set(my_offset_fields)) return fields, {} + class IOHandlerGadgetFOFHaloHDF5(IOHandlerGadgetFOFHDF5): _dataset_type = "gadget_fof_halo_hdf5" @@ -198,9 +199,9 @@ def _read_particle_selection(self, dobj, fields): rv = {} ind = {} # We first need a set of masks for each particle type - ptf = defaultdict(list) # ON-DISK TO READ - fsize = defaultdict(lambda: 0) # COUNT RV - field_maps = defaultdict(list) # ptypes -> fields + ptf = defaultdict(list) # ON-DISK TO READ + fsize = defaultdict(lambda: 0) # COUNT RV + field_maps = defaultdict(list) # ptypes -> fields unions = self.ds.particle_unions # What we need is a mapping from particle types to return types for field in fields: @@ -227,11 +228,11 @@ def _read_particle_selection(self, dobj, fields): if field[1] in self._vector_fields: shape = (fsize[field], self._vector_fields[field[1]]) elif field[1] in self._array_fields: - shape = (fsize[field],)+self._array_fields[field[1]] + shape = (fsize[field],) + self._array_fields[field[1]] elif field in self.ds.scalar_field_list: shape = (1,) else: - shape = (fsize[field], ) + shape = (fsize[field],) rv[field] = np.empty(shape, dtype="float64") ind[field] = 0 # Now we read. @@ -239,33 +240,35 @@ def _read_particle_selection(self, dobj, fields): # Note that we now need to check the mappings for field_f in field_maps[field_r]: my_ind = ind[field_f] - rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals + rv[field_f][my_ind : my_ind + vals.shape[0], ...] = vals ind[field_f] += vals.shape[0] # Now we need to truncate all our fields, since we allow for # over-estimating. for field_f in ind: - rv[field_f] = rv[field_f][:ind[field_f]] + rv[field_f] = rv[field_f][: ind[field_f]] return rv def _read_scalar_fields(self, dobj, scalar_fields): all_data = {} - if not scalar_fields: return all_data + if not scalar_fields: + return all_data pcount = 1 with h5py.File(dobj.scalar_data_file.filename, mode="r") as f: for ptype, field_list in sorted(scalar_fields.items()): for field in field_list: if field == "particle_identifier": - field_data = \ - np.arange(dobj.scalar_data_file.total_particles[ptype]) + \ - dobj.scalar_data_file.index_start[ptype] + field_data = ( + np.arange(dobj.scalar_data_file.total_particles[ptype]) + + dobj.scalar_data_file.index_start[ptype] + ) elif field in f[ptype]: field_data = f[ptype][field][()].astype("float64") else: - fname = field[:field.rfind("_")] + fname = field[: field.rfind("_")] field_data = f[ptype][fname][()].astype("float64") my_div = field_data.size / pcount if my_div > 1: - findex = int(field[field.rfind("_") + 1:]) + findex = int(field[field.rfind("_") + 1 :]) field_data = field_data[:, findex] data = np.array([field_data[dobj.scalar_index]]) all_data[(ptype, field)] = data @@ -329,12 +332,14 @@ def _identify_fields(self, data_file): for ptype in self.ds.particle_types_raw: fields.append((ptype, "particle_identifier")) scalar_fields.append((ptype, "particle_identifier")) - my_fields, my_offset_fields = \ - subfind_field_list(f[ptype], ptype, data_file.total_particles) + my_fields, my_offset_fields = subfind_field_list( + f[ptype], ptype, data_file.total_particles + ) fields.extend(my_fields) scalar_fields.extend(my_fields) - if "IDs" not in f: continue + if "IDs" not in f: + continue id_fields = [(ptype, field) for field in f["IDs"]] fields.extend(id_fields) return fields, scalar_fields, id_fields, {} @@ -373,7 +378,9 @@ def subfind_field_list(fh, ptype, pcount): fields.append(("Group", fname)) offset_fields.append(fname) else: - mylog.warning("Cannot add field (%s, %s) with size %d." % \ - (ptype, fh[field].name, fh[field].size)) + mylog.warning( + "Cannot add field (%s, %s) with size %d." + % (ptype, fh[field].name, fh[field].size) + ) continue return fields, offset_fields diff --git a/yt/frontends/halo_catalog/api.py b/yt/frontends/halo_catalog/api.py index 8aa2d4204ce..53b2b575289 100644 --- a/yt/frontends/halo_catalog/api.py +++ b/yt/frontends/halo_catalog/api.py @@ -1,8 +1,3 @@ -from .data_structures import \ - YTHaloCatalogDataset - -from .io import \ - IOHandlerYTHaloCatalog - -from .fields import \ - YTHaloCatalogFieldInfo +from .data_structures import YTHaloCatalogDataset +from .fields import YTHaloCatalogFieldInfo +from .io import IOHandlerYTHaloCatalog diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 90042c6de59..5cbee86d82d 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -1,27 +1,23 @@ +import glob +import weakref from collections import defaultdict from functools import partial -import glob -from yt.utilities.on_demand_imports import _h5py as h5py + import numpy as np -import weakref -from .fields import \ - YTHaloCatalogFieldInfo, \ - YTHaloCatalogHaloFieldInfo - -from yt.data_objects.data_containers import \ - YTSelectionContainer -from yt.data_objects.static_output import \ - ParticleDataset -from yt.frontends.ytdata.data_structures import \ - SavedDataset -from yt.funcs import \ - parse_h5_attr -from yt.geometry.particle_geometry_handler import \ - ParticleIndex -from yt.data_objects.static_output import \ - ParticleFile, \ - validate_index_order +from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.static_output import ( + ParticleDataset, + ParticleFile, + validate_index_order, +) +from yt.frontends.ytdata.data_structures import SavedDataset +from yt.funcs import parse_h5_attr +from yt.geometry.particle_geometry_handler import ParticleIndex +from yt.utilities.on_demand_imports import _h5py as h5py + +from .fields import YTHaloCatalogFieldInfo, YTHaloCatalogHaloFieldInfo + class HaloCatalogFile(ParticleFile): """ @@ -30,9 +26,9 @@ class HaloCatalogFile(ParticleFile): This is mainly here to correct for periodicity when reading particle positions. """ + def __init__(self, ds, io, filename, file_id, frange): - super(HaloCatalogFile, self).__init__( - ds, io, filename, file_id, frange) + super(HaloCatalogFile, self).__init__(ds, io, filename, file_id, frange) def _read_particle_positions(self, ptype, f=None): raise NotImplementedError @@ -56,19 +52,21 @@ def _get_particle_positions(self, ptype, f=None): return pos + class YTHaloCatalogFile(HaloCatalogFile): """ Data file class for the YTHaloCatalogDataset. """ + def __init__(self, ds, io, filename, file_id, frange): with h5py.File(filename, mode="r") as f: - self.header = dict((field, parse_h5_attr(f, field)) \ - for field in f.attrs.keys()) - pids = f.get('particles/ids') + self.header = dict( + (field, parse_h5_attr(f, field)) for field in f.attrs.keys() + ) + pids = f.get("particles/ids") self.total_ids = 0 if pids is None else pids.size self.group_length_sum = self.total_ids - super(YTHaloCatalogFile, self).__init__( - ds, io, filename, file_id, frange) + super(YTHaloCatalogFile, self).__init__(ds, io, filename, file_id, frange) def _read_particle_positions(self, ptype, f=None): """ @@ -91,6 +89,7 @@ def _read_particle_positions(self, ptype, f=None): return pos + class YTHaloCatalogDataset(SavedDataset): """ Dataset class for halo catalogs made with yt. @@ -98,21 +97,37 @@ class YTHaloCatalogDataset(SavedDataset): This covers yt FoF/HoP halo finders and the halo analysis in yt_astro_analysis. """ + _index_class = ParticleIndex _file_class = YTHaloCatalogFile _field_info_class = YTHaloCatalogFieldInfo _suffix = ".h5" - _con_attrs = ("cosmological_simulation", - "current_time", "current_redshift", - "hubble_constant", "omega_matter", "omega_lambda", - "domain_left_edge", "domain_right_edge") - - def __init__(self, filename, dataset_type="ythalocatalog", - index_order=None, units_override=None, unit_system="cgs"): + _con_attrs = ( + "cosmological_simulation", + "current_time", + "current_redshift", + "hubble_constant", + "omega_matter", + "omega_lambda", + "domain_left_edge", + "domain_right_edge", + ) + + def __init__( + self, + filename, + dataset_type="ythalocatalog", + index_order=None, + units_override=None, + unit_system="cgs", + ): self.index_order = validate_index_order(index_order) - super(YTHaloCatalogDataset, self).__init__(filename, dataset_type, - units_override=units_override, - unit_system=unit_system) + super(YTHaloCatalogDataset, self).__init__( + filename, + dataset_type, + units_override=units_override, + unit_system=unit_system, + ) def add_field(self, *args, **kwargs): super(YTHaloCatalogDataset, self).add_field(*args, **kwargs) @@ -127,6 +142,7 @@ def halos_derived_field_list(self): return self._halos_ds.derived_field_list _instantiated_halo_ds = None + @property def _halos_ds(self): if self._instantiated_halo_ds is None: @@ -162,6 +178,7 @@ def _is_valid(self, *args, **kwargs): return True return False + class YTHaloParticleIndex(ParticleIndex): """ Particle index for getting halo particles from YTHaloCatalogDatasets. @@ -178,17 +195,25 @@ def _calculate_particle_index_starts(self): particle_count = defaultdict(int) offset_count = 0 for data_file in self.data_files: - data_file.index_start = dict([(ptype, particle_count[ptype]) for - ptype in data_file.total_particles]) + data_file.index_start = dict( + [(ptype, particle_count[ptype]) for ptype in data_file.total_particles] + ) data_file.offset_start = offset_count for ptype in data_file.total_particles: particle_count[ptype] += data_file.total_particles[ptype] offset_count += getattr(data_file, "total_offset", 0) - self._halo_index_start = \ - dict([(ptype, np.array([data_file.index_start[ptype] - for data_file in self.data_files])) - for ptype in self.ds.particle_types_raw]) + self._halo_index_start = dict( + [ + ( + ptype, + np.array( + [data_file.index_start[ptype] for data_file in self.data_files] + ), + ) + for ptype in self.ds.particle_types_raw + ] + ) def _create_halo_id_table(self): pass @@ -197,24 +222,25 @@ def _detect_output_fields(self): field_list = [] scalar_field_list = [] units = {} - pc = dict([(ptype, sum([d.total_particles[ptype] - for d in self.data_files])) - for ptype in self.ds.particle_types_raw]) - found_fields = dict([(ptype, False) for ptype, pnum in pc.items() - if pnum > 0]) + pc = dict( + [ + (ptype, sum([d.total_particles[ptype] for d in self.data_files])) + for ptype in self.ds.particle_types_raw + ] + ) + found_fields = dict([(ptype, False) for ptype, pnum in pc.items() if pnum > 0]) has_ids = False for data_file in self.data_files: fl, sl, idl, _units = self.io._identify_fields(data_file) units.update(_units) - field_list.extend([f for f in fl - if f not in field_list]) - scalar_field_list.extend([f for f in sl - if f not in scalar_field_list]) + field_list.extend([f for f in fl if f not in field_list]) + scalar_field_list.extend([f for f in sl if f not in scalar_field_list]) for ptype in found_fields: found_fields[ptype] |= data_file.total_particles[ptype] has_ids |= len(idl) > 0 - if all(found_fields.values()) and has_ids: break + if all(found_fields.values()) and has_ids: + break self.field_list = field_list self.scalar_field_list = scalar_field_list @@ -225,41 +251,41 @@ def _detect_output_fields(self): ds.particle_types_raw = ds.particle_types def _get_halo_file_indices(self, ptype, identifiers): - return np.digitize(identifiers, - self._halo_index_start[ptype], right=False) - 1 + return np.digitize(identifiers, self._halo_index_start[ptype], right=False) - 1 def _get_halo_scalar_index(self, ptype, identifier): i_scalar = self._get_halo_file_indices(ptype, [identifier])[0] scalar_index = identifier - self._halo_index_start[ptype][i_scalar] return scalar_index - def _get_halo_values(self, ptype, identifiers, fields, - f=None): + def _get_halo_values(self, ptype, identifiers, fields, f=None): """ Get field values for halo data containers. """ # if a file is already open, don't open it again - filename = None if f is None \ - else f.filename + filename = None if f is None else f.filename data = defaultdict(lambda: np.empty(identifiers.size)) i_scalars = self._get_halo_file_indices(ptype, identifiers) for i_scalar in np.unique(i_scalars): target = i_scalars == i_scalar - scalar_indices = identifiers - \ - self._halo_index_start[ptype][i_scalar] + scalar_indices = identifiers - self._halo_index_start[ptype][i_scalar] # only open file if it's not already open - my_f = f if self.data_files[i_scalar].filename == filename \ - else h5py.File(self.data_files[i_scalar].filename, "r") + my_f = ( + f + if self.data_files[i_scalar].filename == filename + else h5py.File(self.data_files[i_scalar].filename, "r") + ) for field in fields: - data[field][target] = \ - self._read_halo_particle_field( - my_f, ptype, field, scalar_indices[target]) + data[field][target] = self._read_halo_particle_field( + my_f, ptype, field, scalar_indices[target] + ) - if self.data_files[i_scalar].filename != filename: my_f.close() + if self.data_files[i_scalar].filename != filename: + my_f.close() return data @@ -269,13 +295,13 @@ def _identify_base_chunk(self, dobj): def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[field][indices] - def _read_particle_fields(self, fields, dobj, chunk = None): - if len(fields) == 0: return {}, [] + def _read_particle_fields(self, fields, dobj, chunk=None): + if len(fields) == 0: + return {}, [] fields_to_read, fields_to_generate = self._split_fields(fields) if len(fields_to_read) == 0: return {}, fields_to_generate - fields_to_return = self.io._read_particle_selection( - dobj, fields_to_read) + fields_to_return = self.io._read_particle_selection(dobj, fields_to_read) return fields_to_return, fields_to_generate def _setup_data_io(self): @@ -284,12 +310,13 @@ def _setup_data_io(self): self.real_ds.index # inherit some things from parent index - for attr in ['data_files', 'total_particles']: + for attr in ["data_files", "total_particles"]: setattr(self, attr, getattr(self.real_ds.index, attr)) self._calculate_particle_index_starts() self._create_halo_id_table() + class HaloDataset(ParticleDataset): """ Base class for dataset accessing particles from halo catalogs. @@ -297,13 +324,16 @@ class HaloDataset(ParticleDataset): def __init__(self, ds, dataset_type): self.real_ds = ds - for attr in ['filename_template', 'file_count', - 'particle_types_raw', 'particle_types', - 'periodicity']: + for attr in [ + "filename_template", + "file_count", + "particle_types_raw", + "particle_types", + "periodicity", + ]: setattr(self, attr, getattr(self.real_ds, attr)) - super(HaloDataset, self).__init__( - self.real_ds.parameter_filename, dataset_type) + super(HaloDataset, self).__init__(self.real_ds.parameter_filename, dataset_type) def print_key_parameters(self): pass @@ -312,18 +342,25 @@ def _set_derived_attrs(self): pass def _parse_parameter_file(self): - for attr in ["cosmological_simulation", "cosmology", - "current_redshift", "current_time", - "dimensionality", "domain_dimensions", - "domain_left_edge", "domain_right_edge", - "domain_width", "hubble_constant", - "omega_lambda", "omega_matter", - "unique_identifier"]: + for attr in [ + "cosmological_simulation", + "cosmology", + "current_redshift", + "current_time", + "dimensionality", + "domain_dimensions", + "domain_left_edge", + "domain_right_edge", + "domain_width", + "hubble_constant", + "omega_lambda", + "omega_matter", + "unique_identifier", + ]: setattr(self, attr, getattr(self.real_ds, attr)) def set_code_units(self): - for unit in ["length", "time", "mass", - "velocity", "magnetic", "temperature"]: + for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: my_unit = "%s_unit" % unit setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) self.unit_registry = self.real_ds.unit_registry @@ -334,6 +371,7 @@ def __repr__(self): def _setup_classes(self): self.objects = [] + class YTHaloDataset(HaloDataset): """ Dataset used for accessing member particles from YTHaloCatalogDatasets. @@ -346,6 +384,7 @@ class YTHaloDataset(HaloDataset): def __init__(self, ds, dataset_type="ythalo"): super(YTHaloDataset, self).__init__(ds, dataset_type) + class HaloContainer(YTSelectionContainer): """ Base class for data containers providing halo particles. @@ -358,8 +397,10 @@ class HaloContainer(YTSelectionContainer): def __init__(self, ptype, particle_identifier, ds=None): if ptype not in ds.particle_types_raw: - raise RuntimeError("Possible halo types are %s, supplied \"%s\"." % - (ds.particle_types_raw, ptype)) + raise RuntimeError( + 'Possible halo types are %s, supplied "%s".' + % (ds.particle_types_raw, ptype) + ) self.ptype = ptype self._current_particle_type = ptype @@ -368,8 +409,9 @@ def __init__(self, ptype, particle_identifier, ds=None): self._set_identifiers(particle_identifier) # Find the file that has the scalar values for this halo. - i_scalar = self.index._get_halo_file_indices( - ptype, [self.particle_identifier])[0] + i_scalar = self.index._get_halo_file_indices(ptype, [self.particle_identifier])[ + 0 + ] self.i_scalar = i_scalar self.scalar_data_file = self.index.data_files[i_scalar] @@ -378,7 +420,8 @@ def __init__(self, ptype, particle_identifier, ds=None): # index within halo arrays that corresponds to this halo self.scalar_index = self.index._get_halo_scalar_index( - ptype, self.particle_identifier) + ptype, self.particle_identifier + ) self._set_io_data() self.particle_number = self._get_particle_number() @@ -387,6 +430,7 @@ def __init__(self, ptype, particle_identifier, ds=None): self._set_field_indices() _mass = None + @property def mass(self): if self._mass is None: @@ -394,6 +438,7 @@ def mass(self): return self._mass _radius = None + @property def radius(self): if self._radius is None: @@ -401,6 +446,7 @@ def radius(self): return self._radius _position = None + @property def position(self): if self._position is None: @@ -408,6 +454,7 @@ def position(self): return self._position _velocity = None + @property def velocity(self): if self._velocity is None: @@ -417,14 +464,15 @@ def velocity(self): def _set_io_data(self): halo_fields = self._get_member_fieldnames() my_data = self.index._get_halo_values( - self.ptype, np.array([self.particle_identifier]), - halo_fields) - self._io_data = dict((field, np.int64(val[0])) - for field, val in my_data.items()) + self.ptype, np.array([self.particle_identifier]), halo_fields + ) + self._io_data = dict( + (field, np.int64(val[0])) for field, val in my_data.items() + ) def __repr__(self): - return "%s_%s_%09d" % \ - (self.ds, self.ptype, self.particle_identifier) + return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier) + class YTHaloCatalogHaloContainer(HaloContainer): """ @@ -470,13 +518,13 @@ class YTHaloCatalogHaloContainer(HaloContainer): """ def _get_member_fieldnames(self): - return ['particle_number', 'particle_index_start'] + return ["particle_number", "particle_index_start"] def _get_particle_number(self): - return self._io_data['particle_number'] + return self._io_data["particle_number"] def _set_field_indices(self): - self.field_data_start = [self._io_data['particle_index_start']] + self.field_data_start = [self._io_data["particle_index_start"]] self.field_data_end = [self.field_data_start[0] + self.particle_number] def _set_identifiers(self, particle_identifier): diff --git a/yt/frontends/halo_catalog/fields.py b/yt/frontends/halo_catalog/fields.py index 4bf9f1adfc7..6884141e60a 100644 --- a/yt/frontends/halo_catalog/fields.py +++ b/yt/frontends/halo_catalog/fields.py @@ -17,17 +17,14 @@ ("virial_radius", (r_units, [], "Virial Radius")), ) + class YTHaloCatalogFieldInfo(FieldInfoContainer): - known_other_fields = ( - ) + known_other_fields = () known_particle_fields = _particle_fields + class YTHaloCatalogHaloFieldInfo(FieldInfoContainer): - known_other_fields = ( - ) + known_other_fields = () - known_particle_fields = _particle_fields + \ - ( - ("ids", ("", ["member_ids"], None)), - ) + known_particle_fields = _particle_fields + (("ids", ("", ["member_ids"], None)),) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index e506f14f150..aa50c6d5045 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -1,14 +1,12 @@ -from collections import \ - defaultdict +from collections import defaultdict + import numpy as np -from yt.funcs import \ - parse_h5_attr -from yt.units.yt_array import \ - uvstack +from yt.funcs import parse_h5_attr +from yt.units.yt_array import uvstack +from yt.utilities.io_handler import BaseIOHandler from yt.utilities.on_demand_imports import _h5py as h5py -from yt.utilities.io_handler import \ - BaseIOHandler + class IOHandlerYTHaloCatalog(BaseIOHandler): _dataset_type = "ythalocatalog" @@ -81,14 +79,16 @@ def _count_particles(self, data_file): def _identify_fields(self, data_file): with h5py.File(data_file.filename, "r") as f: - fields = [("halos", field) for field in f - if not isinstance(f[field], h5py.Group)] - units = dict([(("halos", field), - parse_h5_attr(f[field], "units")) - for field in f]) + fields = [ + ("halos", field) for field in f if not isinstance(f[field], h5py.Group) + ] + units = dict( + [(("halos", field), parse_h5_attr(f[field], "units")) for field in f] + ) return fields, units -class HaloDatasetIOHandler(): + +class HaloDatasetIOHandler: """ Base class for io handlers to load halo member particles. """ @@ -117,9 +117,9 @@ def _read_particle_selection(self, dobj, fields): rv = {} ind = {} # We first need a set of masks for each particle type - ptf = defaultdict(list) # ON-DISK TO READ - fsize = defaultdict(lambda: 0) # COUNT RV - field_maps = defaultdict(list) # ptypes -> fields + ptf = defaultdict(list) # ON-DISK TO READ + fsize = defaultdict(lambda: 0) # COUNT RV + field_maps = defaultdict(list) # ptypes -> fields unions = self.ds.particle_unions # What we need is a mapping from particle types to return types for field in fields: @@ -146,11 +146,11 @@ def _read_particle_selection(self, dobj, fields): if field[1] in self._vector_fields: shape = (fsize[field], self._vector_fields[field[1]]) elif field[1] in self._array_fields: - shape = (fsize[field],)+self._array_fields[field[1]] + shape = (fsize[field],) + self._array_fields[field[1]] elif field in self.ds.scalar_field_list: shape = (1,) else: - shape = (fsize[field], ) + shape = (fsize[field],) rv[field] = np.empty(shape, dtype="float64") ind[field] = 0 # Now we read. @@ -158,34 +158,35 @@ def _read_particle_selection(self, dobj, fields): # Note that we now need to check the mappings for field_f in field_maps[field_r]: my_ind = ind[field_f] - rv[field_f][my_ind:my_ind + vals.shape[0],...] = vals + rv[field_f][my_ind : my_ind + vals.shape[0], ...] = vals ind[field_f] += vals.shape[0] # Now we need to truncate all our fields, since we allow for # over-estimating. for field_f in ind: - rv[field_f] = rv[field_f][:ind[field_f]] + rv[field_f] = rv[field_f][: ind[field_f]] return rv + class IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): _dataset_type = "ythalo" def _identify_fields(self, data_file): with h5py.File(data_file.filename, "r") as f: - scalar_fields = [("halos", field) for field in f - if not isinstance(f[field], h5py.Group)] - units = dict([(("halos", field), - parse_h5_attr(f[field], "units")) - for field in f]) - if 'particles' in f: - id_fields = [('halos', field) for field in f['particles']] + scalar_fields = [ + ("halos", field) for field in f if not isinstance(f[field], h5py.Group) + ] + units = dict( + [(("halos", field), parse_h5_attr(f[field], "units")) for field in f] + ) + if "particles" in f: + id_fields = [("halos", field) for field in f["particles"]] else: id_fields = [] - return scalar_fields+id_fields, scalar_fields, id_fields, units + return scalar_fields + id_fields, scalar_fields, id_fields, units def _read_member_fields(self, dobj, member_fields): - all_data = defaultdict(lambda: np.empty(dobj.particle_number, - dtype=np.float64)) + all_data = defaultdict(lambda: np.empty(dobj.particle_number, dtype=np.float64)) if not member_fields: return all_data field_start = 0 @@ -200,8 +201,9 @@ def _read_member_fields(self, dobj, member_fields): for ptype, field_list in sorted(member_fields.items()): for field in field_list: field_data = all_data[(ptype, field)] - my_data = \ - f['particles'][field][start_index:end_index].astype("float64") + my_data = f["particles"][field][start_index:end_index].astype( + "float64" + ) field_data[field_start:field_end] = my_data field_start = field_end return all_data diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index c0d314bf7fb..7b1214d99e1 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,23 +1,19 @@ import numpy as np -from yt.convenience import \ - load as yt_load -from yt.frontends.halo_catalog.data_structures import \ - YTHaloCatalogDataset -from yt.frontends.ytdata.utilities import \ - save_as_dataset -from yt.testing import \ - assert_allclose_units, \ - assert_array_equal, \ - assert_equal, \ - requires_file, \ - requires_module, \ - TempDirTest -from yt.units.yt_array import \ - YTArray, \ - YTQuantity -from yt.utilities.answer_testing.framework import \ - data_dir_load +from yt.convenience import load as yt_load +from yt.frontends.halo_catalog.data_structures import YTHaloCatalogDataset +from yt.frontends.ytdata.utilities import save_as_dataset +from yt.testing import ( + TempDirTest, + assert_allclose_units, + assert_array_equal, + assert_equal, + requires_file, + requires_module, +) +from yt.units.yt_array import YTArray, YTQuantity +from yt.utilities.answer_testing.framework import data_dir_load + def fake_halo_catalog(data): filename = "catalog.0.h5" @@ -99,31 +95,37 @@ def test_halo_catalog_boundary_particles(self): f2.sort() assert_array_equal(f1, f2) + t46 = "tiny_fof_halos/DD0046/DD0046.0.h5" + + @requires_file(t46) -@requires_module('h5py') +@requires_module("h5py") def test_halo_quantities(): ds = data_dir_load(t46) ad = ds.all_data() for i in range(ds.index.total_particles): - hid = int(ad['halos', 'particle_identifier'][i]) - halo = ds.halo('halos', hid) - for field in ['mass', 'position', 'velocity']: - v1 = ad['halos', 'particle_%s' % field][i] + hid = int(ad["halos", "particle_identifier"][i]) + halo = ds.halo("halos", hid) + for field in ["mass", "position", "velocity"]: + v1 = ad["halos", "particle_%s" % field][i] v2 = getattr(halo, field) assert_allclose_units( - v1, v2, rtol=1e-15, - err_msg='Halo %d %s field mismatch.' % (hid, field)) + v1, v2, rtol=1e-15, err_msg="Halo %d %s field mismatch." % (hid, field) + ) + t46 = "tiny_fof_halos/DD0046/DD0046.0.h5" + + @requires_file(t46) -@requires_module('h5py') +@requires_module("h5py") def test_halo_particles(): ds = data_dir_load(t46) - i = ds.r['halos', 'particle_mass'].argmax() - hid = int(ds.r['halos', 'particle_identifier'][i]) - halo = ds.halo('halos', hid) - ids = halo['halos', 'member_ids'] + i = ds.r["halos", "particle_mass"].argmax() + hid = int(ds.r["halos", "particle_identifier"][i]) + halo = ds.halo("halos", hid) + ids = halo["halos", "member_ids"] assert_equal(ids.size, 420) - assert_equal(ids.min(), 19478.) - assert_equal(ids.max(), 31669.) + assert_equal(ids.min(), 19478.0) + assert_equal(ids.max(), 31669.0) diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 0ca1ef30e13..ddae7423a4e 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -48,7 +48,8 @@ def convert(self, unit): def _setup_filenames(self): if hasattr(self, "data_files"): raise RuntimeError( - '_setup_filenames has already been called. Something is wrong.') + "_setup_filenames has already been called. Something is wrong." + ) template = self.dataset.filename_template ndoms = self.dataset.file_count From f35d324bf33f25bd1f87940dd322cfb9fdc42354 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 5 Aug 2020 15:21:25 +0100 Subject: [PATCH 254/653] Add missing methods. --- yt/frontends/halo_catalog/data_structures.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 5cbee86d82d..a9857220682 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -384,6 +384,12 @@ class YTHaloDataset(HaloDataset): def __init__(self, ds, dataset_type="ythalo"): super(YTHaloDataset, self).__init__(ds, dataset_type) + def _set_code_unit_attributes(self): + pass + + @classmethod + def _is_valid(self, *args, **kwargs): + return False class HaloContainer(YTSelectionContainer): """ From d5b3ffa8fc29661188dd8c195fb5992c9086a540 Mon Sep 17 00:00:00 2001 From: yt-fido Date: Wed, 5 Aug 2020 14:30:12 +0000 Subject: [PATCH 255/653] [format-command] fixes --- yt/frontends/halo_catalog/data_structures.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index a9857220682..133b468e012 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -391,6 +391,7 @@ def _set_code_unit_attributes(self): def _is_valid(self, *args, **kwargs): return False + class HaloContainer(YTSelectionContainer): """ Base class for data containers providing halo particles. From 04f87d399d148f7e9e8cca444a8e3b1bdf0a86f2 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 5 Aug 2020 15:35:12 +0100 Subject: [PATCH 256/653] Fix flake8 error. --- yt/frontends/adaptahop/data_structures.py | 1 + yt/frontends/gadget_fof/data_structures.py | 1 + 2 files changed, 2 insertions(+) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 5c3406ff993..0a29462323a 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -7,6 +7,7 @@ """ +import numpy as np import os import re import stat diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index bfbd6f70fe7..8203e55a3a3 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -14,6 +14,7 @@ from yt.geometry.particle_geometry_handler import ParticleIndex from yt.utilities.cosmology import Cosmology from yt.utilities.logger import ytLogger as mylog +from yt.utilities.on_demand_imports import _h5py as h5py class GadgetFOFParticleIndex(ParticleIndex): From 12be9a240bf81bba7d479f0e9be4ed08b3e21886 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 16:39:59 +0200 Subject: [PATCH 257/653] Remove duplicate line... --- yt/frontends/ramses/particle_handlers.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 258d1b5a507..b7c1e301d24 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -48,8 +48,6 @@ class ParticleFileHandler( _file_type = "particle" - _file_type = "particle" - # These properties are static properties ptype = None # The name to give to the particle type fname = None # The name of the file(s). From 587153e3f086783c9cf716105259ce586128a883 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 5 Aug 2020 15:46:48 +0100 Subject: [PATCH 258/653] Fix import. --- yt/frontends/adaptahop/data_structures.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 0a29462323a..63fde4b8402 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -7,11 +7,12 @@ """ -import numpy as np import os import re import stat +import numpy as np + from yt.data_objects.data_containers import YTSelectionContainer from yt.data_objects.static_output import Dataset from yt.frontends.halo_catalog.data_structures import HaloCatalogFile From 55c130169c4c57aea461cd2b29abe18dbc1c7882 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 16:49:54 +0200 Subject: [PATCH 259/653] Add docstring to mixin --- yt/frontends/ramses/field_handlers.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 2f024ad9a56..9ce72cea54a 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -39,6 +39,10 @@ def __new__(meta, name, bases, class_dict): class HandlerMixin: + """This contains all the shared methods to handle RAMSES files. + + This is not supposed to be user-facing. + """ def setup_handler(self, domain): """ Initalize an instance of the class. This automatically sets From 30ba97f4f78163eb36cc53374402a5baaaebbce6 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 16:51:06 +0200 Subject: [PATCH 260/653] Change names --- yt/frontends/ramses/field_handlers.py | 4 ++-- yt/frontends/ramses/particle_handlers.py | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 9ce72cea54a..b4255735349 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -23,7 +23,7 @@ def register_field_handler(ph): DETECTED_FIELDS = {} -class RAMSESFieldFileHandlerRegistry(abc.ABCMeta): +class RegisteredRAMSESFieldFileHandler(abc.ABCMeta): """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. @@ -141,7 +141,7 @@ def any_exist(cls, ds): return exists -class FieldFileHandler(abc.ABC, HandlerMixin, metaclass=RAMSESFieldFileHandlerRegistry): +class FieldFileHandler(abc.ABC, HandlerMixin, metaclass=RegisteredRAMSESFieldFileHandler): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index b7c1e301d24..8cc1dde72d0 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -19,7 +19,7 @@ def register_particle_handler(ph): PARTICLE_HANDLERS.add(ph) -class RAMSESParticleFileHandlerRegistry(abc.ABCMeta): +class RegisteredRAMSESParticleFileHandler(abc.ABCMeta): """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. @@ -35,7 +35,7 @@ def __new__(meta, name, bases, class_dict): class ParticleFileHandler( - abc.ABC, HandlerMixin, metaclass=RAMSESParticleFileHandlerRegistry + abc.ABC, HandlerMixin, metaclass=RegisteredRAMSESParticleFileHandler ): """ Abstract class to handle particles in RAMSES. Each instance From c760e6fd2b0b2a2f6cfa9b1384c2a56c602cff61 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 16:52:51 +0200 Subject: [PATCH 261/653] black pass --- yt/frontends/ramses/field_handlers.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index b4255735349..a5cde560633 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -43,6 +43,7 @@ class HandlerMixin: This is not supposed to be user-facing. """ + def setup_handler(self, domain): """ Initalize an instance of the class. This automatically sets @@ -141,7 +142,9 @@ def any_exist(cls, ds): return exists -class FieldFileHandler(abc.ABC, HandlerMixin, metaclass=RegisteredRAMSESFieldFileHandler): +class FieldFileHandler( + abc.ABC, HandlerMixin, metaclass=RegisteredRAMSESFieldFileHandler +): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). From 226ad06764439da8008ecf1e9491a1e19ac444e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 17:14:44 +0200 Subject: [PATCH 262/653] add missing entries to boxlib frontend api --- yt/frontends/boxlib/api.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/yt/frontends/boxlib/api.py b/yt/frontends/boxlib/api.py index b1bfd3c0aa8..334cc8bb63f 100644 --- a/yt/frontends/boxlib/api.py +++ b/yt/frontends/boxlib/api.py @@ -1,5 +1,7 @@ from . import tests from .data_structures import ( + AMReXDataset, + AMReXHierarchy, BoxlibDataset, BoxlibGrid, BoxlibHierarchy, From f5c42b866239938dcb7ddef5634242609ae399ae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 17:54:47 +0200 Subject: [PATCH 263/653] improve test name Co-authored-by: Matthew Turk --- yt/tests/test_load_errors.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index c1811a98752..603c075664c 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -7,7 +7,7 @@ from yt.utilities.exceptions import YTOutputNotIdentified, YTSimulationNotIdentified -def test_load_unexisting_data(): +def test_load_nonexistent_data(): with tempfile.TemporaryDirectory() as tmpdir: assert_raises(OSError, load, os.path.join(tmpdir, "not_a_file")) assert_raises(OSError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo") From 36d5b9882795b85503727f8c978927dd3d97106a Mon Sep 17 00:00:00 2001 From: Michael Zingale Date: Wed, 5 Aug 2020 12:09:09 -0400 Subject: [PATCH 264/653] add an example of this functionality to the cookbook --- doc/source/cookbook/complex_plots.rst | 6 ++++++ doc/source/cookbook/multiplot_export_to_mpl.py | 17 +++++++++++++++++ 2 files changed, 23 insertions(+) create mode 100644 doc/source/cookbook/multiplot_export_to_mpl.py diff --git a/doc/source/cookbook/complex_plots.rst b/doc/source/cookbook/complex_plots.rst index 11a1d157c6d..3096e0a7deb 100644 --- a/doc/source/cookbook/complex_plots.rst +++ b/doc/source/cookbook/complex_plots.rst @@ -93,6 +93,12 @@ for more information. .. yt_cookbook:: multiplot_2x2.py +The above example gives you full control over the plots, but for most +purposes, the ``export_to_mpl_figure`` method is a simpler option, +allowing us to make a similar plot as: + +.. yt_cookbook:: multiplot_export_to_mpl.py + Multipanel with PhasePlot ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/source/cookbook/multiplot_export_to_mpl.py b/doc/source/cookbook/multiplot_export_to_mpl.py new file mode 100644 index 00000000000..35c54c05cba --- /dev/null +++ b/doc/source/cookbook/multiplot_export_to_mpl.py @@ -0,0 +1,17 @@ +import yt + +ds = yt.load_sample("IsolatedGalaxy") + +fields = ['density', 'velocity_x', 'velocity_y', 'velocity_magnitude'] +p = yt.SlicePlot(ds, 'z', fields) +p.set_log('velocity_x', False) +p.set_log('velocity_y', False) + +# this returns a matplotlib figure with an ImageGrid and the slices +# added to the grid of axes (in this case, 2x2) +fig = p.export_to_mpl_figure((2,2)) + +fig.tight_layout() + +fig.savefig("multiplot_export_to_mpl.png") + From a9a912bbde79c183ca23001ab57b2124347f4d4b Mon Sep 17 00:00:00 2001 From: Michael Zingale Date: Wed, 5 Aug 2020 12:31:49 -0400 Subject: [PATCH 265/653] the exponent in the latex string needs to be in {} to render properly --- yt/visualization/volume_rendering/transfer_functions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 6983484ebfd..40cd7a64ceb 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -663,7 +663,7 @@ def x_format(x, pos): if abs(val) < 1.0e-3 or abs(val) > 1.0e4: if not val == 0.0: e = np.floor(np.log10(abs(val))) - return r"${:.2f}\times 10^{:d}$".format(val / 10.0 ** e, int(e)) + return r"${:.2f}\times 10^{{ {:d} }}$".format(val / 10.0 ** e, int(e)) else: return r"$0$" else: From 28054e0e094643a9c4ef48c4c6c087a3f2018718 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 18:07:15 +0200 Subject: [PATCH 266/653] refactor: minimal error catching --- yt/frontends/enzo/simulation_handling.py | 17 ++++++++--------- yt/frontends/exodus_ii/simulation_handling.py | 8 ++++---- yt/frontends/gadget/simulation_handling.py | 18 +++++++++--------- 3 files changed, 21 insertions(+), 22 deletions(-) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index c8c929388c4..9872c475c66 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -658,16 +658,15 @@ def _check_for_outputs(self, potential_outputs): ) try: ds = load(filename) - my_storage.result = { - "filename": filename, - "time": ds.current_time.in_units("s"), - } - if ds.cosmological_simulation: - my_storage.result["redshift"] = ds.current_redshift - except OSError: - pass - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): mylog.error("Failed to load %s", filename) + continue + my_storage.result = { + "filename": filename, + "time": ds.current_time.in_units("s"), + } + if ds.cosmological_simulation: + my_storage.result["redshift"] = ds.current_redshift mylog.setLevel(llevel) my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index 88bd41b1c06..c8bca75cd54 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -91,11 +91,11 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - my_storage.result = {"filename": output, "num_steps": ds.num_steps} - except OSError: - pass - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): mylog.error("Failed to load %s", output) + continue + my_storage.result = {"filename": output, "num_steps": ds.num_steps} + my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None ] diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 8ebf282a232..2b2a633ed7c 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -523,16 +523,16 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - my_storage.result = { - "filename": output, - "time": ds.current_time.in_units("s"), - } - if ds.cosmological_simulation: - my_storage.result["redshift"] = ds.current_redshift - except OSError: - pass - except YTOutputNotIdentified: + except (OSError, YTOutputNotIdentified): mylog.error("Failed to load %s", output) + continue + my_storage.result = { + "filename": output, + "time": ds.current_time.in_units("s"), + } + if ds.cosmological_simulation: + my_storage.result["redshift"] = ds.current_redshift + my_outputs = [ my_output for my_output in my_outputs.values() if my_output is not None ] From 5b87ac3130035fab03204cc351746632e18e3faf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 19:04:34 +0200 Subject: [PATCH 267/653] add a specialized exception for ambiguous input --- yt/convenience.py | 14 +++++++++----- yt/utilities/exceptions.py | 14 ++++++++++++++ 2 files changed, 23 insertions(+), 5 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index 1f5bd4ad421..48267ddc014 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -2,8 +2,11 @@ # Named imports from yt.config import ytcfg -from yt.funcs import mylog -from yt.utilities.exceptions import YTOutputNotIdentified, YTSimulationNotIdentified +from yt.utilities.exceptions import ( + YTAmbiguousDataType, + YTOutputNotIdentified, + YTSimulationNotIdentified, +) from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.parameter_file_storage import ( output_type_registry, @@ -42,6 +45,9 @@ def load(fn, *args, **kwargs): yt.utilities.exceptions.YTOutputNotIdentified If fn matches existing files or directories with undetermined format. + + yt.utilities.exceptions.YTAmbiguousDataType + If the data format matches more than one class of similar specilization levels. """ fn = os.path.expanduser(fn) @@ -73,9 +79,7 @@ def load(fn, *args, **kwargs): return candidates[0](fn, *args, **kwargs) if len(candidates) > 1: - mylog.error("Multiple output type candidates for %s:", fn) - for c in candidates: - mylog.error(" Possible: %s", c) + raise YTAmbiguousDataType(fn, candidates) raise YTOutputNotIdentified(fn, args, kwargs) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 6d582da0df7..e4b77ea848c 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -28,6 +28,20 @@ def __str__(self): return msg +class YTAmbiguousDataType(YTOutputNotIdentified): + def __init__(self, filename, candidates): + self.filename = filename + self.candidates = candidates + + def __str__(self): + msg = f"Multiple data type candidates for {self.filename}\n" + msg += "The following independent classes were detected as valid :\n" + for c in self.candidates: + msg += f"{c}\n" + msg += "Please report this to https://github.com/yt-project/yt/issues/new" + return msg + + class YTSphereTooSmall(YTException): def __init__(self, ds, radius, smallest_cell): YTException.__init__(self, ds=ds) From 039e1a944c4f11acc8e15ac1b3f88cec103e6cbb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 20:33:03 +0200 Subject: [PATCH 268/653] use fstrings in yt.convenience.py --- yt/convenience.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index 48267ddc014..93d530747c7 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -62,9 +62,9 @@ def load(fn, *args, **kwargs): if os.path.exists(alt_fn): fn = alt_fn else: - msg = "No such file or directory: %s" % fn + msg = f"No such file or directory: {fn}" if os.path.exists(data_dir): - msg += "\n(Also tried %s)" % alt_fn + msg += f"\n(Also tried {alt_fn})" raise OSError(msg) candidates = [] @@ -113,7 +113,7 @@ def simulation(fn, simulation_type, find_outputs=False): if os.path.exists(alt_fn): fn = alt_fn else: - raise OSError("No such file or directory: %s" % fn) + raise OSError(f"No such file or directory: {fn}") try: cls = simulation_time_series_registry[simulation_type] From bbbd2b4b08dcb8cd25e4e121851e9ffcb4d1a4c6 Mon Sep 17 00:00:00 2001 From: yt-fido Date: Wed, 5 Aug 2020 18:34:40 +0000 Subject: [PATCH 269/653] [format-command] fixes --- doc/source/cookbook/amrkdtree_downsampling.py | 9 +++++++-- yt/visualization/volume_rendering/transfer_functions.py | 4 +++- 2 files changed, 10 insertions(+), 3 deletions(-) diff --git a/doc/source/cookbook/amrkdtree_downsampling.py b/doc/source/cookbook/amrkdtree_downsampling.py index 8abe9decdcc..6e7970f545b 100644 --- a/doc/source/cookbook/amrkdtree_downsampling.py +++ b/doc/source/cookbook/amrkdtree_downsampling.py @@ -71,8 +71,13 @@ alpha=10.0 * np.ones(4, dtype="float64"), colormap="RdBu_r", ) -tf.add_layers(4, 0.01, col_bounds=[-27.5, -25.5], - alpha=10.0 * np.ones(4, dtype='float64'), colormap='RdBu_r') +tf.add_layers( + 4, + 0.01, + col_bounds=[-27.5, -25.5], + alpha=10.0 * np.ones(4, dtype="float64"), + colormap="RdBu_r", +) sc.save("v4.png", sigma_clip=6.0) # ## This looks pretty good, now lets go back to the full resolution AMRKDTree diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 40cd7a64ceb..3e42cfedba3 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -663,7 +663,9 @@ def x_format(x, pos): if abs(val) < 1.0e-3 or abs(val) > 1.0e4: if not val == 0.0: e = np.floor(np.log10(abs(val))) - return r"${:.2f}\times 10^{{ {:d} }}$".format(val / 10.0 ** e, int(e)) + return r"${:.2f}\times 10^{{ {:d} }}$".format( + val / 10.0 ** e, int(e) + ) else: return r"$0$" else: From 0eca09072e3e7109f55348492600f57974ea8699 Mon Sep 17 00:00:00 2001 From: Michael Zingale Date: Wed, 5 Aug 2020 14:36:37 -0400 Subject: [PATCH 270/653] run black --- yt/visualization/volume_rendering/transfer_functions.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 40cd7a64ceb..3e42cfedba3 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -663,7 +663,9 @@ def x_format(x, pos): if abs(val) < 1.0e-3 or abs(val) > 1.0e4: if not val == 0.0: e = np.floor(np.log10(abs(val))) - return r"${:.2f}\times 10^{{ {:d} }}$".format(val / 10.0 ** e, int(e)) + return r"${:.2f}\times 10^{{ {:d} }}$".format( + val / 10.0 ** e, int(e) + ) else: return r"$0$" else: From 97f34c9af2af66f190518cfb5784700434c2bc59 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 5 Aug 2020 22:46:27 +0200 Subject: [PATCH 271/653] improve error message with a workaround hint and better formatting --- yt/utilities/exceptions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index e4b77ea848c..ffd2282a395 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -38,6 +38,7 @@ def __str__(self): msg += "The following independent classes were detected as valid :\n" for c in self.candidates: msg += f"{c}\n" + msg += "A possible workaround is to directly instantiate one of the above.\n" msg += "Please report this to https://github.com/yt-project/yt/issues/new" return msg From c01e416685583f7584b70259b465f62433bcc289 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 07:48:30 +0200 Subject: [PATCH 272/653] change error from OSError to FileNotFoundError --- yt/convenience.py | 8 +++---- yt/frontends/enzo/simulation_handling.py | 2 +- yt/frontends/exodus_ii/simulation_handling.py | 2 +- yt/frontends/gadget/simulation_handling.py | 2 +- yt/tests/test_load_errors.py | 8 ++++--- yt/utilities/answer_testing/framework.py | 21 ++++++++----------- yt/utilities/answer_testing/utils.py | 13 ++++++------ 7 files changed, 28 insertions(+), 28 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index 93d530747c7..f7f56e59b82 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -40,7 +40,7 @@ def load(fn, *args, **kwargs): Raises ------ - OSError + FileNotFoundError If fn does not match any existing file or directory. yt.utilities.exceptions.YTOutputNotIdentified @@ -65,7 +65,7 @@ def load(fn, *args, **kwargs): msg = f"No such file or directory: {fn}" if os.path.exists(data_dir): msg += f"\n(Also tried {alt_fn})" - raise OSError(msg) + raise FileNotFoundError(msg) candidates = [] for cls in output_type_registry.values(): @@ -101,7 +101,7 @@ def simulation(fn, simulation_type, find_outputs=False): Raises ------ - OSError + FileNotFoundError If fn is not found. yt.utilities.exceptions.YTSimulationNotIdentified @@ -113,7 +113,7 @@ def simulation(fn, simulation_type, find_outputs=False): if os.path.exists(alt_fn): fn = alt_fn else: - raise OSError(f"No such file or directory: {fn}") + raise FileNotFoundError(f"No such file or directory: {fn}") try: cls = simulation_time_series_registry[simulation_type] diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 9872c475c66..a9a53dd8ba7 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -658,7 +658,7 @@ def _check_for_outputs(self, potential_outputs): ) try: ds = load(filename) - except (OSError, YTOutputNotIdentified): + except (FileNotFoundError, YTOutputNotIdentified): mylog.error("Failed to load %s", filename) continue my_storage.result = { diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index c8bca75cd54..a029e987c66 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -91,7 +91,7 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - except (OSError, YTOutputNotIdentified): + except (FileNotFoundError, YTOutputNotIdentified): mylog.error("Failed to load %s", output) continue my_storage.result = {"filename": output, "num_steps": ds.num_steps} diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 2b2a633ed7c..fa2092b90a8 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -523,7 +523,7 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - except (OSError, YTOutputNotIdentified): + except (FileNotFoundError, YTOutputNotIdentified): mylog.error("Failed to load %s", output) continue my_storage.result = { diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 603c075664c..6f0f1562a8d 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -9,14 +9,16 @@ def test_load_nonexistent_data(): with tempfile.TemporaryDirectory() as tmpdir: - assert_raises(OSError, load, os.path.join(tmpdir, "not_a_file")) - assert_raises(OSError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo") + assert_raises(FileNotFoundError, load, os.path.join(tmpdir, "not_a_file")) + assert_raises( + FileNotFoundError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo" + ) # this one is a design choice: it is preferable to report the most important # problem in an error message (missing data is worse than a typo in # simulation_type), so we make sure the error raised is not YTSimulationNotIdentified assert_raises( - OSError, + FileNotFoundError, simulation, os.path.join(tmpdir, "not_a_file"), "unregistered_simulation_type", diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 33379e0dade..f207cb7715c 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -16,14 +16,11 @@ import zlib from collections import defaultdict -import matplotlib.image as mpimg import numpy as np +from matplotlib import image as mpimg from matplotlib.testing.compare import compare_images from nose.plugins import Plugin -import yt.visualization.particle_plots as particle_plots -import yt.visualization.plot_window as pw -import yt.visualization.profile_plotter as profile_plotter from yt.config import ytcfg from yt.convenience import load, simulation from yt.data_objects.static_output import Dataset @@ -36,13 +33,13 @@ assert_rel_equal, ) from yt.utilities.command_line import get_yt_version -from yt.utilities.exceptions import ( - YTCloudError, - YTNoAnswerNameSpecified, - YTNoOldAnswer, - YTOutputNotIdentified, -) +from yt.utilities.exceptions import YTCloudError, YTNoAnswerNameSpecified, YTNoOldAnswer from yt.utilities.logger import disable_stream_logging +from yt.visualization import ( + particle_plots as particle_plots, + plot_window as pw, + profile_plotter as profile_plotter, +) mylog = logging.getLogger("nose.plugins.answer-testing") run_big_data = False @@ -307,7 +304,7 @@ def can_run_ds(ds_fn, file_check=False): return os.path.isfile(os.path.join(path, ds_fn)) and result_storage is not None try: load(ds_fn) - except (OSError, YTOutputNotIdentified): + except FileNotFoundError: if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: result_storage["tainted"] = True @@ -327,7 +324,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): return os.path.isfile(os.path.join(path, sim_fn)) and result_storage is not None try: simulation(sim_fn, sim_type) - except (OSError, YTOutputNotIdentified): + except FileNotFoundError: if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: result_storage["tainted"] = True diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 567a6d2c810..1fd64a051cc 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -12,16 +12,17 @@ import pytest import yaml -import yt.visualization.particle_plots as particle_plots -import yt.visualization.plot_window as pw -import yt.visualization.profile_plotter as profile_plotter from yt.config import ytcfg from yt.convenience import load, simulation from yt.data_objects.selection_data_containers import YTRegion from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.api import save_as_dataset from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.exceptions import YTOutputNotIdentified +from yt.visualization import ( + particle_plots as particle_plots, + plot_window as pw, + profile_plotter as profile_plotter, +) from yt.visualization.volume_rendering.scene import Scene @@ -310,7 +311,7 @@ def can_run_ds(ds_fn, file_check=False): try: load(ds_fn) return True - except (OSError, YTOutputNotIdentified): + except FileNotFoundError: return False @@ -326,7 +327,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): return os.path.isfile(os.path.join(path, sim_fn)) try: simulation(sim_fn, sim_type) - except (OSError, YTOutputNotIdentified): + except FileNotFoundError: return False return True From 9e51f575d67321b7f10203c79170d63bd44a22bb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 07:54:27 +0200 Subject: [PATCH 273/653] add a test for ambiguous data detection --- yt/tests/test_load_errors.py | 26 +++++++++++++++++++++++++- 1 file changed, 25 insertions(+), 1 deletion(-) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 6f0f1562a8d..1316d8cc00d 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -3,8 +3,13 @@ from pathlib import Path from yt.convenience import load, simulation +from yt.data_objects.static_output import Dataset from yt.testing import assert_raises -from yt.utilities.exceptions import YTOutputNotIdentified, YTSimulationNotIdentified +from yt.utilities.exceptions import ( + YTAmbiguousDataType, + YTOutputNotIdentified, + YTSimulationNotIdentified, +) def test_load_nonexistent_data(): @@ -43,3 +48,22 @@ def test_load_unidentified_data(): empty_file_path, "unregistered_simulation_type", ) + + +def test_load_ambiguous_data(): + # we deliberately setup a situation where two Dataset subclasses + # that aren't parents are consisdered valid + class FakeDataset(Dataset): + @classmethod + def _is_valid(cls, *args, **kwargs): + return True + + class FakeDataset2(Dataset): + @classmethod + def _is_valid(cls, *args, **kwargs): + return True + + with tempfile.TemporaryDirectory() as tmpdir: + empty_file_path = Path(tmpdir) / "empty_file" + empty_file_path.touch() + assert_raises(YTAmbiguousDataType, load, tmpdir) From a01c2272ae98f4fb791875f10f7b8b1521a82d37 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 08:44:14 +0200 Subject: [PATCH 274/653] mock std lib error format for FileNotFoundError --- yt/convenience.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index f7f56e59b82..04de6f30d42 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -62,9 +62,9 @@ def load(fn, *args, **kwargs): if os.path.exists(alt_fn): fn = alt_fn else: - msg = f"No such file or directory: {fn}" + msg = f"No such file or directory: '{fn}'" if os.path.exists(data_dir): - msg += f"\n(Also tried {alt_fn})" + msg += f"\n(Also tried '{alt_fn}')" raise FileNotFoundError(msg) candidates = [] @@ -113,7 +113,7 @@ def simulation(fn, simulation_type, find_outputs=False): if os.path.exists(alt_fn): fn = alt_fn else: - raise FileNotFoundError(f"No such file or directory: {fn}") + raise FileNotFoundError(f"No such file or directory: '{fn}'") try: cls = simulation_time_series_registry[simulation_type] From e4100e28f47a58f797028878979ee61a3ad96a75 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 09:43:55 +0100 Subject: [PATCH 275/653] Add support for max_level --- yt/frontends/ramses/data_structures.py | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 30dbda5ee9e..bf13a9002e1 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -134,6 +134,10 @@ def _read_amr_header(self): # Now we're at the tree itself # Now we iterate over each level and each CPU. self.amr_header = hvals + # update levelmax + self.amr_header["nlevelmax"] = min( + self.ds._force_max_level, self.amr_header["nlevelmax"] + ) self.amr_offset = f.tell() self.local_oct_count = hvals["numbl"][ self.ds.min_level :, self.domain_id - 1 @@ -371,7 +375,9 @@ def _initialize_oct_handler(self): total_octs = sum( dom.local_oct_count for dom in self.domains # + dom.ngridbound.sum() ) - self.max_level = max(dom.max_level for dom in self.domains) + self.max_level = min( + self.ds._force_max_level, max(dom.max_level for dom in self.domains) + ) self.num_grids = total_octs def _detect_output_fields(self): @@ -521,6 +527,7 @@ def __init__( extra_particle_fields=None, cosmological=None, bbox=None, + max_level=99999, ): # Here we want to initiate a traceback, if the reader is not built. if isinstance(fields, str): @@ -545,6 +552,7 @@ def __init__( self._extra_particle_fields = extra_particle_fields self.force_cosmological = cosmological self._bbox = bbox + self._force_max_level = max_level # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -723,7 +731,9 @@ def read_rhs(f, cast): self.omega_lambda = rheader["omega_l"] self.omega_matter = rheader["omega_m"] self.hubble_constant = rheader["H0"] / 100.0 # This is H100 - self.max_level = rheader["levelmax"] - self.min_level - 1 + self.max_level = ( + min(self._force_max_level, rheader["levelmax"]) - self.min_level - 1 + ) if self.cosmological_simulation == 0: self.current_time = self.parameters["time"] From 1c94c814887ec79c5fe7208e2390a63a4253e6d0 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 09:48:52 +0100 Subject: [PATCH 276/653] This looks like a bug for me So everybody just follow me \'Cause we need a little controversy \'Cause it feels so empty without me --- yt/frontends/ramses/particle_handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 340b6a413bc..1d7148f24c6 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -351,7 +351,7 @@ def read_header(self): fields = list(self.known_fields) for i in range(self.ds.dimensionality * 2 + 1): - for j in range(self.ds.max_level, self.ds.min_level): + for j in range(self.ds.min_level, self.ds.max_level): fields.append(("particle_prop_%s_%s" % (i, j), "d")) field_offsets = {} From fc5cbc9b0dcb8e9ded68d354481e50d8dd7470a9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 11:52:08 +0200 Subject: [PATCH 277/653] More stringent assumption on force_max_level --- yt/frontends/ramses/data_structures.py | 36 ++++++++++++++++++++++++-- 1 file changed, 34 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index bf13a9002e1..7e58582a30b 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -1,6 +1,7 @@ import os import weakref from collections import defaultdict +from collections.abc import Iterable from glob import glob import numpy as np @@ -527,7 +528,7 @@ def __init__( extra_particle_fields=None, cosmological=None, bbox=None, - max_level=99999, + max_level=None, ): # Here we want to initiate a traceback, if the reader is not built. if isinstance(fields, str): @@ -552,7 +553,6 @@ def __init__( self._extra_particle_fields = extra_particle_fields self.force_cosmological = cosmological self._bbox = bbox - self._force_max_level = max_level # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -598,6 +598,37 @@ def __init__( self.storage_filename = storage_filename + # Setup max/min level + ok = ( + isinstance(max_level, Iterable) and len(max_level) == 2 + ) or max_level is None + if not ok: + raise RuntimeError( + "Expected `max_level` to be an iterable of length 2 (level, convention) with " + f"convention in 'yt', 'ramses'), got {max_level} instead." + ) + else: + force_max_level, max_level_convention = max_level + + # Convert level numbering from yt to ramses convention + if max_level_convention == "yt": + force_max_level += self.min_level + 1 + + if force_max_level < 0: + raise RuntimeError( + f"Cannot set `force_max_level` to {force_max_level} as it is a negative value. " + f"Change the value of `max_level` (received {max_level})." + ) + elif force_max_level > self.min_level + self.max_level + 1: + mylog.warning( + "`force_max_level` was set to %s, which is larger than the deepest level (%s). " + "It will have no effect", + force_max_level, + self.min_level + self.max_level + 1, + ) + + self._force_max_level = force_max_level + def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" super(RAMSESDataset, self).create_field_info(*args, **kwa) @@ -731,6 +762,7 @@ def read_rhs(f, cast): self.omega_lambda = rheader["omega_l"] self.omega_matter = rheader["omega_m"] self.hubble_constant = rheader["H0"] / 100.0 # This is H100 + self.max_level = ( min(self._force_max_level, rheader["levelmax"]) - self.min_level - 1 ) From d6d64e37495b15107f776535d62e940adec557dd Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 14:59:46 +0200 Subject: [PATCH 278/653] Enforce tuple definition of max_level --- yt/frontends/ramses/data_structures.py | 50 +++++++++++++------------- 1 file changed, 26 insertions(+), 24 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 7e58582a30b..d850c6aede8 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -136,9 +136,11 @@ def _read_amr_header(self): # Now we iterate over each level and each CPU. self.amr_header = hvals # update levelmax - self.amr_header["nlevelmax"] = min( - self.ds._force_max_level, self.amr_header["nlevelmax"] - ) + + max_level, convention = self.ds._force_max_level + if convention == "yt": + max_level += self.ds.min_level + 1 + self.amr_header["nlevelmax"] = min(max_level, self.amr_header["nlevelmax"]) self.amr_offset = f.tell() self.local_oct_count = hvals["numbl"][ self.ds.min_level :, self.domain_id - 1 @@ -376,9 +378,11 @@ def _initialize_oct_handler(self): total_octs = sum( dom.local_oct_count for dom in self.domains # + dom.ngridbound.sum() ) - self.max_level = min( - self.ds._force_max_level, max(dom.max_level for dom in self.domains) - ) + max_level, convention = self.ds._force_max_level + if convention == "yt": + max_level += self.ds.min_level + 1 + + self.max_level = min(max_level, max(dom.max_level for dom in self.domains)) self.num_grids = total_octs def _detect_output_fields(self): @@ -554,6 +558,8 @@ def __init__( self.force_cosmological = cosmological self._bbox = bbox + self.set_max_level(max_level) + # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -598,9 +604,11 @@ def __init__( self.storage_filename = storage_filename - # Setup max/min level + def set_max_level(self, max_level): ok = ( - isinstance(max_level, Iterable) and len(max_level) == 2 + isinstance(max_level, Iterable) + and len(max_level) == 2 + and max_level[1] in ("yt", "ramses") ) or max_level is None if not ok: raise RuntimeError( @@ -608,26 +616,19 @@ def __init__( f"convention in 'yt', 'ramses'), got {max_level} instead." ) else: - force_max_level, max_level_convention = max_level + if max_level is None: + force_max_level, convention = (999, "ramses") + else: + force_max_level, convention = max_level # Convert level numbering from yt to ramses convention - if max_level_convention == "yt": - force_max_level += self.min_level + 1 - if force_max_level < 0: raise RuntimeError( f"Cannot set `force_max_level` to {force_max_level} as it is a negative value. " - f"Change the value of `max_level` (received {max_level})." - ) - elif force_max_level > self.min_level + self.max_level + 1: - mylog.warning( - "`force_max_level` was set to %s, which is larger than the deepest level (%s). " - "It will have no effect", - force_max_level, - self.min_level + self.max_level + 1, + f"Change the value of `max_level` (received {max_level[0]})." ) - self._force_max_level = force_max_level + self._force_max_level = (force_max_level, convention) def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" @@ -763,9 +764,10 @@ def read_rhs(f, cast): self.omega_matter = rheader["omega_m"] self.hubble_constant = rheader["H0"] / 100.0 # This is H100 - self.max_level = ( - min(self._force_max_level, rheader["levelmax"]) - self.min_level - 1 - ) + max_level, convention = self._force_max_level + if convention == "yt": + max_level += self.min_level + 1 + self.max_level = (min(max_level, rheader["levelmax"]) - self.min_level - 1,) if self.cosmological_simulation == 0: self.current_time = self.parameters["time"] From 52405b322a0a595fc8080b7925d22a149f1905ec Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:06:32 +0200 Subject: [PATCH 279/653] Add doc --- doc/source/examining/loading_data.rst | 26 ++++++++++++++++++++++++++ 1 file changed, 26 insertions(+) diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index fb0224a8607..154ef6dd8e5 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -2479,6 +2479,32 @@ It is possible to provide extra arguments to the load function when loading RAMS .. note:: The ``bbox`` feature is only available for datasets using Hilbert ordering. +``max_level`` + This will set the deepest level to be read from file. It + accepts a tuple of length 2 with format (level, convention), + where the convention is either "ramses" or "yt". + + In the "ramses" convention, levels go from 1 (the root grid) + to levelmax, such that the finest cells have a size of ``1/2**levelmax``. + In the "yt" convention, levels are numbered from 0 (the coarsest + uniform grid, equivalement of RAMSES' ``levelmin`` parameter) + to ``max_level``, such that the finest cells are ``2**max_level`` smaller + than the coarsest. + + + .. code-block:: python + import yt + + # Assuming the tree is full down to a level 6 + ds_all = yt.load('output_00080/info_00080.txt') + ds_yt = yt.load('output_00080/info_00080.txt', max_level=(2, "yt")) + ds_ramses = yt.load('output_00080/info_00080.txt', max_level=(8, "ramses")) + + any(ds_all.r['index', 'grid_level'] > 2) # True + all(ds_yt.r['index', 'grid_level'] <= 2) # True + all(ds_ramses.r['index', 'grid_level'] <= 2) # True + + Adding custom particle fields ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ From 3ee95d6efb56513247db51f8d8796ca349c8d971 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:09:33 +0200 Subject: [PATCH 280/653] Add test --- yt/frontends/ramses/tests/test_outputs.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 2ea59228d45..edad3d5b118 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -561,3 +561,17 @@ def test_field_accession(): ): for field in fields: reg[field] + + +@requires_file(output_00080) +def test_max_level(): + ds = yt.load(output_00080) + + assert any(ds.r["index", "grid_level"] > 2) + + for ds in ( + yt.load(output_00080, max_level=(2, "yt")), + yt.load(output_00080, max_level=(8, "ramses")), + ): + assert all(ds.r["index", "grid_level"] <= 2) + assert any(ds.r["index", "grid_level"] == 2) From 35a8d5b8ff910e42af8d890c9caa67c28fe5cd92 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:16:13 +0200 Subject: [PATCH 281/653] Revert "Enforce tuple definition of max_level" This reverts commit 00a537b4cf667ed3e724eb4c804d119d5b4059b7. --- yt/frontends/ramses/data_structures.py | 50 +++++++++++++------------- 1 file changed, 24 insertions(+), 26 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index d850c6aede8..7e58582a30b 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -136,11 +136,9 @@ def _read_amr_header(self): # Now we iterate over each level and each CPU. self.amr_header = hvals # update levelmax - - max_level, convention = self.ds._force_max_level - if convention == "yt": - max_level += self.ds.min_level + 1 - self.amr_header["nlevelmax"] = min(max_level, self.amr_header["nlevelmax"]) + self.amr_header["nlevelmax"] = min( + self.ds._force_max_level, self.amr_header["nlevelmax"] + ) self.amr_offset = f.tell() self.local_oct_count = hvals["numbl"][ self.ds.min_level :, self.domain_id - 1 @@ -378,11 +376,9 @@ def _initialize_oct_handler(self): total_octs = sum( dom.local_oct_count for dom in self.domains # + dom.ngridbound.sum() ) - max_level, convention = self.ds._force_max_level - if convention == "yt": - max_level += self.ds.min_level + 1 - - self.max_level = min(max_level, max(dom.max_level for dom in self.domains)) + self.max_level = min( + self.ds._force_max_level, max(dom.max_level for dom in self.domains) + ) self.num_grids = total_octs def _detect_output_fields(self): @@ -558,8 +554,6 @@ def __init__( self.force_cosmological = cosmological self._bbox = bbox - self.set_max_level(max_level) - # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -604,11 +598,9 @@ def __init__( self.storage_filename = storage_filename - def set_max_level(self, max_level): + # Setup max/min level ok = ( - isinstance(max_level, Iterable) - and len(max_level) == 2 - and max_level[1] in ("yt", "ramses") + isinstance(max_level, Iterable) and len(max_level) == 2 ) or max_level is None if not ok: raise RuntimeError( @@ -616,19 +608,26 @@ def set_max_level(self, max_level): f"convention in 'yt', 'ramses'), got {max_level} instead." ) else: - if max_level is None: - force_max_level, convention = (999, "ramses") - else: - force_max_level, convention = max_level + force_max_level, max_level_convention = max_level # Convert level numbering from yt to ramses convention + if max_level_convention == "yt": + force_max_level += self.min_level + 1 + if force_max_level < 0: raise RuntimeError( f"Cannot set `force_max_level` to {force_max_level} as it is a negative value. " - f"Change the value of `max_level` (received {max_level[0]})." + f"Change the value of `max_level` (received {max_level})." + ) + elif force_max_level > self.min_level + self.max_level + 1: + mylog.warning( + "`force_max_level` was set to %s, which is larger than the deepest level (%s). " + "It will have no effect", + force_max_level, + self.min_level + self.max_level + 1, ) - self._force_max_level = (force_max_level, convention) + self._force_max_level = force_max_level def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" @@ -764,10 +763,9 @@ def read_rhs(f, cast): self.omega_matter = rheader["omega_m"] self.hubble_constant = rheader["H0"] / 100.0 # This is H100 - max_level, convention = self._force_max_level - if convention == "yt": - max_level += self.min_level + 1 - self.max_level = (min(max_level, rheader["levelmax"]) - self.min_level - 1,) + self.max_level = ( + min(self._force_max_level, rheader["levelmax"]) - self.min_level - 1 + ) if self.cosmological_simulation == 0: self.current_time = self.parameters["time"] From 196c4693264083b300fa25411bfe707b6f3cb4ba Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:30:07 +0200 Subject: [PATCH 282/653] Now only accepting tuples --- yt/frontends/ramses/data_structures.py | 57 +++++++++++------------ yt/frontends/ramses/tests/test_outputs.py | 18 +++++++ 2 files changed, 44 insertions(+), 31 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 7e58582a30b..ba713dedb7c 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -136,8 +136,11 @@ def _read_amr_header(self): # Now we iterate over each level and each CPU. self.amr_header = hvals # update levelmax + force_max_level, convention = self.ds._force_max_level + if convention == "yt": + force_max_level += self.ds.min_level + 1 self.amr_header["nlevelmax"] = min( - self.ds._force_max_level, self.amr_header["nlevelmax"] + force_max_level, self.amr_header["nlevelmax"] ) self.amr_offset = f.tell() self.local_oct_count = hvals["numbl"][ @@ -376,8 +379,11 @@ def _initialize_oct_handler(self): total_octs = sum( dom.local_oct_count for dom in self.domains # + dom.ngridbound.sum() ) + force_max_level, convention = self.ds._force_max_level + if convention == "yt": + force_max_level += self.ds.min_level + 1 self.max_level = min( - self.ds._force_max_level, max(dom.max_level for dom in self.domains) + force_max_level, max(dom.max_level for dom in self.domains) ) self.num_grids = total_octs @@ -554,6 +560,8 @@ def __init__( self.force_cosmological = cosmological self._bbox = bbox + self._set_max_level(max_level) + # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -598,37 +606,21 @@ def __init__( self.storage_filename = storage_filename - # Setup max/min level - ok = ( - isinstance(max_level, Iterable) and len(max_level) == 2 - ) or max_level is None - if not ok: - raise RuntimeError( - "Expected `max_level` to be an iterable of length 2 (level, convention) with " - f"convention in 'yt', 'ramses'), got {max_level} instead." - ) - else: - force_max_level, max_level_convention = max_level - - # Convert level numbering from yt to ramses convention - if max_level_convention == "yt": - force_max_level += self.min_level + 1 - - if force_max_level < 0: + def _set_max_level(self, max_level): + max_level = max_level if max_level else (999, "yt") + try: + lvl, convention = max_level + assert lvl >= 0 + assert convention in ("yt", "ramses") + except Exception: raise RuntimeError( - f"Cannot set `force_max_level` to {force_max_level} as it is a negative value. " - f"Change the value of `max_level` (received {max_level})." + "Expected `max_level` to be of the form (level, convention) " + f"with convention either 'yt' or 'ramses'. Got {max_level} " + "instead." ) - elif force_max_level > self.min_level + self.max_level + 1: - mylog.warning( - "`force_max_level` was set to %s, which is larger than the deepest level (%s). " - "It will have no effect", - force_max_level, - self.min_level + self.max_level + 1, - ) - - self._force_max_level = force_max_level + self._force_max_level = max_level + def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" super(RAMSESDataset, self).create_field_info(*args, **kwa) @@ -763,8 +755,11 @@ def read_rhs(f, cast): self.omega_matter = rheader["omega_m"] self.hubble_constant = rheader["H0"] / 100.0 # This is H100 + force_max_level, convention = self._force_max_level + if convention == "yt": + force_max_level += self.min_level + 1 self.max_level = ( - min(self._force_max_level, rheader["levelmax"]) - self.min_level - 1 + min(force_max_level, rheader["levelmax"]) - self.min_level - 1 ) if self.cosmological_simulation == 0: diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index edad3d5b118..af3ab0c4030 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -8,6 +8,7 @@ from yt.frontends.ramses.field_handlers import DETECTED_FIELDS, HydroFieldFileHandler from yt.testing import ( assert_equal, + assert_raises, requires_file, requires_module, units_override_check, @@ -569,9 +570,26 @@ def test_max_level(): assert any(ds.r["index", "grid_level"] > 2) + # Should work for ds in ( yt.load(output_00080, max_level=(2, "yt")), yt.load(output_00080, max_level=(8, "ramses")), ): assert all(ds.r["index", "grid_level"] <= 2) assert any(ds.r["index", "grid_level"] == 2) + + +@requires_file(ramses_new_format) +def test_invalid_max_level(): + # Should fail + invalid_args = ( + "invalid", + 2, + (2, "invalid"), + (1, 2, 3, "invalid"), + ("yt", 1), + (-1, "yt"), + ) + for max_level_arg in invalid_args: + with assert_raises(RuntimeError): + yt.load(output_00080, max_level=max_level_arg) \ No newline at end of file From 927d8f8f7aa7db881da7f74f33d2c97c3dc3d30f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:30:25 +0200 Subject: [PATCH 283/653] Men in black --- yt/frontends/ramses/data_structures.py | 5 +---- yt/frontends/ramses/tests/test_outputs.py | 2 +- 2 files changed, 2 insertions(+), 5 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index ba713dedb7c..8ee8f562e49 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -620,7 +620,6 @@ def _set_max_level(self, max_level): ) self._force_max_level = max_level - def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" super(RAMSESDataset, self).create_field_info(*args, **kwa) @@ -758,9 +757,7 @@ def read_rhs(f, cast): force_max_level, convention = self._force_max_level if convention == "yt": force_max_level += self.min_level + 1 - self.max_level = ( - min(force_max_level, rheader["levelmax"]) - self.min_level - 1 - ) + self.max_level = min(force_max_level, rheader["levelmax"]) - self.min_level - 1 if self.cosmological_simulation == 0: self.current_time = self.parameters["time"] diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index af3ab0c4030..8c7c2699572 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -592,4 +592,4 @@ def test_invalid_max_level(): ) for max_level_arg in invalid_args: with assert_raises(RuntimeError): - yt.load(output_00080, max_level=max_level_arg) \ No newline at end of file + yt.load(output_00080, max_level=max_level_arg) From 0e1ba5c422ae851d06b8d4d8a9366624e51b1cc2 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 15:47:58 +0200 Subject: [PATCH 284/653] Fix broken test ?! --- yt/frontends/ramses/particle_handlers.py | 5 +++-- yt/frontends/ramses/tests/test_outputs.py | 6 +++--- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 1d7148f24c6..d5a5705f650 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -350,9 +350,10 @@ def read_header(self): else: fields = list(self.known_fields) + # Note: this follows RAMSES convention. for i in range(self.ds.dimensionality * 2 + 1): - for j in range(self.ds.min_level, self.ds.max_level): - fields.append(("particle_prop_%s_%s" % (i, j), "d")) + for ilvl in range(self.ds.max_level+1): + fields.append(("particle_prop_%s_%s" % (ilvl, i), "d")) field_offsets = {} _pfields = {} diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 8c7c2699572..82818a5c4e9 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -222,9 +222,9 @@ def test_ramses_sink(): "particle_prop_0_1", "particle_prop_0_2", "particle_prop_0_3", - "particle_prop_1_0", - "particle_prop_1_1", - "particle_prop_1_2", + "particle_prop_0_4", + "particle_prop_0_5", + "particle_prop_0_6", "particle_velocity_x", "particle_velocity_y", "particle_velocity_z", From fc3c0e0c01a3892690de1dfb539fe23b9525dc12 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:02:51 +0200 Subject: [PATCH 285/653] Refactor testing of convention --- doc/source/examining/loading_data.rst | 22 ++++++------- yt/frontends/ramses/data_structures.py | 40 +++++++++++++++-------- yt/frontends/ramses/particle_handlers.py | 2 +- yt/frontends/ramses/tests/test_outputs.py | 21 ++++++------ 4 files changed, 49 insertions(+), 36 deletions(-) diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index 154ef6dd8e5..218c47c8e53 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -2479,26 +2479,26 @@ It is possible to provide extra arguments to the load function when loading RAMS .. note:: The ``bbox`` feature is only available for datasets using Hilbert ordering. -``max_level`` - This will set the deepest level to be read from file. It - accepts a tuple of length 2 with format (level, convention), - where the convention is either "ramses" or "yt". + +``max_level, max_level_convention`` + This will set the deepest level to be read from file. Both arguments + have to be set, where the convention can be either "ramses" or "yt". In the "ramses" convention, levels go from 1 (the root grid) - to levelmax, such that the finest cells have a size of ``1/2**levelmax``. + to levelmax, such that the finest cells have a size of ``boxsize/2**levelmax``. In the "yt" convention, levels are numbered from 0 (the coarsest - uniform grid, equivalement of RAMSES' ``levelmin`` parameter) - to ``max_level``, such that the finest cells are ``2**max_level`` smaller - than the coarsest. + uniform grid at RAMSES' ``levelmin``) to ``max_level``, such that + the finest cells are ``2**max_level`` smaller than the coarsest. .. code-block:: python import yt - # Assuming the tree is full down to a level 6 + # Assuming RAMSES' levelmin=6, i.e. the structure is full + # down to levelmin=6 ds_all = yt.load('output_00080/info_00080.txt') - ds_yt = yt.load('output_00080/info_00080.txt', max_level=(2, "yt")) - ds_ramses = yt.load('output_00080/info_00080.txt', max_level=(8, "ramses")) + ds_yt = yt.load('output_00080/info_00080.txt', max_level=2, max_level_convention="yt") + ds_ramses = yt.load('output_00080/info_00080.txt', max_level=8, max_level_convention="ramses") any(ds_all.r['index', 'grid_level'] > 2) # True all(ds_yt.r['index', 'grid_level'] <= 2) # True diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 8ee8f562e49..9ae301a5de8 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -1,7 +1,6 @@ import os import weakref from collections import defaultdict -from collections.abc import Iterable from glob import glob import numpy as np @@ -535,6 +534,7 @@ def __init__( cosmological=None, bbox=None, max_level=None, + max_level_convention=None, ): # Here we want to initiate a traceback, if the reader is not built. if isinstance(fields, str): @@ -560,7 +560,9 @@ def __init__( self.force_cosmological = cosmological self._bbox = bbox - self._set_max_level(max_level) + self._force_max_level = self._sanitize_max_level( + max_level, max_level_convention + ) # Infer if the output is organized in groups root_folder, group_folder = os.path.split(os.path.split(filename)[0]) @@ -606,19 +608,29 @@ def __init__( self.storage_filename = storage_filename - def _set_max_level(self, max_level): - max_level = max_level if max_level else (999, "yt") - try: - lvl, convention = max_level - assert lvl >= 0 - assert convention in ("yt", "ramses") - except Exception: - raise RuntimeError( - "Expected `max_level` to be of the form (level, convention) " - f"with convention either 'yt' or 'ramses'. Got {max_level} " - "instead." + def _sanitize_max_level(self, max_level, max_level_convention): + if max_level is None and max_level_convention is None: + return (999, None) + + # Check max_level is a valid, positive integer + if not isinstance(max_level, int) or max_level < 0: + raise ValueError( + f"Expected `max_level` to be a positive integer, got {max_level} " + f"with type {type(max_level)} instead." + ) + + # Check max_level_convention is set and acceptable + if max_level_convention is None: + raise ValueError( + "You specified `max_level` without specifying any `max_level_convention`. " + "You have to pick either 'yt' or 'ramses'." + ) + elif max_level_convention not in ("ramses", "yt"): + raise ValueError( + f"Invalid convention {max_level_convention}. " + "Valid choices are 'yt' and 'ramses'." ) - self._force_max_level = max_level + return (max_level, max_level_convention) def create_field_info(self, *args, **kwa): """Extend create_field_info to add the particles types.""" diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index d5a5705f650..bb19f490643 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -352,7 +352,7 @@ def read_header(self): # Note: this follows RAMSES convention. for i in range(self.ds.dimensionality * 2 + 1): - for ilvl in range(self.ds.max_level+1): + for ilvl in range(self.ds.max_level + 1): fields.append(("particle_prop_%s_%s" % (ilvl, i), "d")) field_offsets = {} diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 82818a5c4e9..542e175b605 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -572,8 +572,8 @@ def test_max_level(): # Should work for ds in ( - yt.load(output_00080, max_level=(2, "yt")), - yt.load(output_00080, max_level=(8, "ramses")), + yt.load(output_00080, max_level=2, max_level_convention="yt"), + yt.load(output_00080, max_level=8, max_level_convention="ramses"), ): assert all(ds.r["index", "grid_level"] <= 2) assert any(ds.r["index", "grid_level"] == 2) @@ -583,13 +583,14 @@ def test_max_level(): def test_invalid_max_level(): # Should fail invalid_args = ( - "invalid", - 2, - (2, "invalid"), - (1, 2, 3, "invalid"), - ("yt", 1), + (1, None), # you have to set the convention + # invalid conventions + (1, "foo"), + (1, "bar"), + # invalid max_level (-1, "yt"), + ("foo", "yt"), ) - for max_level_arg in invalid_args: - with assert_raises(RuntimeError): - yt.load(output_00080, max_level=max_level_arg) + for lvl, convention in invalid_args: + with assert_raises(ValueError): + yt.load(output_00080, max_level=lvl, max_level_convention=convention) From e7d771b2fb26abecd9f67535a02e2246dddda876 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:26:29 +0200 Subject: [PATCH 286/653] Raise more specific errors --- yt/frontends/ramses/data_structures.py | 13 +++++++++---- yt/frontends/ramses/tests/test_outputs.py | 19 ++++++++++++------- 2 files changed, 21 insertions(+), 11 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 9ae301a5de8..9dafc6197c3 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -610,14 +610,19 @@ def __init__( def _sanitize_max_level(self, max_level, max_level_convention): if max_level is None and max_level_convention is None: - return (999, None) + return (999, "yt") # Check max_level is a valid, positive integer - if not isinstance(max_level, int) or max_level < 0: - raise ValueError( + if not isinstance(max_level, (int, np.integer)): + raise TypeError( f"Expected `max_level` to be a positive integer, got {max_level} " f"with type {type(max_level)} instead." ) + if max_level < 0: + raise ValueError( + f"Expected `max_level` to be a positive integer, got {max_level} " + "instead." + ) # Check max_level_convention is set and acceptable if max_level_convention is None: @@ -625,7 +630,7 @@ def _sanitize_max_level(self, max_level, max_level_convention): "You specified `max_level` without specifying any `max_level_convention`. " "You have to pick either 'yt' or 'ramses'." ) - elif max_level_convention not in ("ramses", "yt"): + if max_level_convention not in ("ramses", "yt"): raise ValueError( f"Invalid convention {max_level_convention}. " "Valid choices are 'yt' and 'ramses'." diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 542e175b605..99f86723069 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -581,16 +581,21 @@ def test_max_level(): @requires_file(ramses_new_format) def test_invalid_max_level(): - # Should fail - invalid_args = ( - (1, None), # you have to set the convention - # invalid conventions + invalid_value_args = ( + (1, None), (1, "foo"), (1, "bar"), - # invalid max_level (-1, "yt"), - ("foo", "yt"), ) - for lvl, convention in invalid_args: + for lvl, convention in invalid_value_args: with assert_raises(ValueError): yt.load(output_00080, max_level=lvl, max_level_convention=convention) + + invalid_type_args = ( + (1.0, "yt"), # not an int + ("invalid", "yt"), + ) + # Should fail with value errors + for lvl, convention in invalid_type_args: + with assert_raises(TypeError): + yt.load(output_00080, max_level=lvl, max_level_convention=convention) From d18a2a8f8fa9e430298a68daff186cb00749fcaf Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:28:17 +0200 Subject: [PATCH 287/653] Add informative comment --- yt/frontends/ramses/data_structures.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 9dafc6197c3..013218f9453 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -609,6 +609,10 @@ def __init__( self.storage_filename = storage_filename def _sanitize_max_level(self, max_level, max_level_convention): + # NOTE: the initialisation of the dataset class requires + # sets self.min_level _and_ requires force_max_level + # to be set, so we cannot convert from to yt/ramses + # conventions if max_level is None and max_level_convention is None: return (999, "yt") From 254787be371505b36f2a92be8c12272b04612c28 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:29:42 +0200 Subject: [PATCH 288/653] Fix typo --- yt/frontends/ramses/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 013218f9453..c2381ccb60e 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -609,8 +609,8 @@ def __init__( self.storage_filename = storage_filename def _sanitize_max_level(self, max_level, max_level_convention): - # NOTE: the initialisation of the dataset class requires - # sets self.min_level _and_ requires force_max_level + # NOTE: the initialisation of the dataset class sets + # self.min_level _and_ requires force_max_level # to be set, so we cannot convert from to yt/ramses # conventions if max_level is None and max_level_convention is None: From b9fea02e84e09792c0c1e832443e3827513ae7e1 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:51:59 +0200 Subject: [PATCH 289/653] Use class method --- yt/frontends/ramses/data_structures.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c2381ccb60e..c943fab859b 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -608,7 +608,8 @@ def __init__( self.storage_filename = storage_filename - def _sanitize_max_level(self, max_level, max_level_convention): + @classmethod + def _sanitize_max_level(cls, max_level, max_level_convention): # NOTE: the initialisation of the dataset class sets # self.min_level _and_ requires force_max_level # to be set, so we cannot convert from to yt/ramses From ab8c813ef1ead1ec90c6041b469f5f3953cf01b4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Aug 2020 16:56:36 +0200 Subject: [PATCH 290/653] Static method, not class method --- yt/frontends/ramses/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c943fab859b..2cfb86eb337 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -608,8 +608,8 @@ def __init__( self.storage_filename = storage_filename - @classmethod - def _sanitize_max_level(cls, max_level, max_level_convention): + @staticmethod + def _sanitize_max_level(max_level, max_level_convention): # NOTE: the initialisation of the dataset class sets # self.min_level _and_ requires force_max_level # to be set, so we cannot convert from to yt/ramses From 759281f4aaadfc28db6d17f6dcf1b914379b01bc Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 09:56:49 +0200 Subject: [PATCH 291/653] Go crazy with fallback max_level - this should not overflow now --- yt/frontends/ramses/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 2cfb86eb337..1a82bb45dac 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -615,7 +615,7 @@ def _sanitize_max_level(max_level, max_level_convention): # to be set, so we cannot convert from to yt/ramses # conventions if max_level is None and max_level_convention is None: - return (999, "yt") + return (2**999, "yt") # Check max_level is a valid, positive integer if not isinstance(max_level, (int, np.integer)): From 7c38a6e32e27ef621c113c11328562f46b3efe0b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 10:41:23 +0200 Subject: [PATCH 292/653] merge strings Co-authored-by: Corentin Cadiou --- yt/data_objects/static_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index bd630f8b36a..6b50cf10f73 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1359,7 +1359,7 @@ def add_field(self, name, function, sampling_type, **kwargs): # Handle the case where the field has already been added. if not override and name in self.field_info: mylog.warning( - "Field %s already exists. To override use " + "force_override=True.", + "Field %s already exists. To override use `force_override=True`.", name, ) From ea0a9b6b183cb646d235d8e4de660375f34027af Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 10:41:41 +0200 Subject: [PATCH 293/653] merge strings Co-authored-by: Corentin Cadiou --- yt/fields/local_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/local_fields.py b/yt/fields/local_fields.py index 644a1e1289b..bf9d389a2cd 100644 --- a/yt/fields/local_fields.py +++ b/yt/fields/local_fields.py @@ -23,7 +23,7 @@ def add_field(self, name, function, sampling_type, **kwargs): # Handle the case where the field has already been added. if not override and name in self: mylog.warning( - "Field %s already exists. To override use force_override=True.", name, + "Field %s already exists. To override use `force_override=True`.", name, ) return super(LocalFieldInfoContainer, self).add_field( From 58b606d02aa5f1326c59a7e9ff20b55ceaf63915 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 10:42:08 +0200 Subject: [PATCH 294/653] More helpful error message Co-authored-by: Corentin Cadiou --- yt/fields/field_info_container.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index d55dc70a7e1..a16790e7941 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -273,8 +273,13 @@ def _sanitize_sampling_type(sampling_type, particle_type=None): except AttributeError as e: raise TypeError("sampling_type should be a string.") from e - if sampling_type not in ("cell", "particle", "local"): - raise ValueError + acceptable_samplings =("cell", "particle", "local") + if sampling_type not in acceptable_samplings: + raise ValueError( + "Invalid sampling type %s. Valid sampling types are %s", + sampling_type, + ", ".join(acceptable_samplings) + ) if particle_type: issue_deprecation_warning( From 24323b4e554a3ae2d649b9c46f0737a7fb4166bc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 10:42:43 +0200 Subject: [PATCH 295/653] put import statement at the module top level --- yt/data_objects/static_output.py | 9 +++------ 1 file changed, 3 insertions(+), 6 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 6b50cf10f73..5cf172b40a1 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -36,7 +36,7 @@ SpectralCubeCoordinateHandler, SphericalCoordinateHandler, ) -from yt.units import UnitContainer, _wrap_display_ytarray +from yt.units import UnitContainer, _wrap_display_ytarray, dimensions from yt.units.dimensions import current_mks from yt.units.unit_object import Unit, define_unit from yt.units.unit_registry import UnitRegistry @@ -1059,7 +1059,6 @@ def _assign_unit_system(self, unit_system): self.unit_registry.unit_system = self.unit_system def _create_unit_registry(self, unit_system): - from yt.units import dimensions as dimensions # yt assumes a CGS unit system by default (for back compat reasons). # Since unyt is MKS by default we specify the MKS values of the base @@ -1090,7 +1089,6 @@ def set_units(self): Creates the unit registry for this dataset. """ - from yt.units.dimensions import length if getattr(self, "cosmological_simulation", False): # this dataset is cosmological, so add cosmological units. @@ -1102,7 +1100,7 @@ def set_units(self): self.unit_registry.add( new_unit, my_u.base_value / (1 + self.current_redshift), - length, + dimensions.length, "\\rm{%s}/(1+z)" % my_unit, prefixable=True, ) @@ -1359,8 +1357,7 @@ def add_field(self, name, function, sampling_type, **kwargs): # Handle the case where the field has already been added. if not override and name in self.field_info: mylog.warning( - "Field %s already exists. To override use `force_override=True`.", - name, + "Field %s already exists. To override use `force_override=True`.", name, ) self.field_info.add_field(name, function, sampling_type, **kwargs) From 56593163e8e7dc5fc8340ace8ba1e8c4c8731d0d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 10:51:45 +0200 Subject: [PATCH 296/653] blackening --- yt/fields/field_info_container.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index a16790e7941..28dd3461277 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -273,12 +273,12 @@ def _sanitize_sampling_type(sampling_type, particle_type=None): except AttributeError as e: raise TypeError("sampling_type should be a string.") from e - acceptable_samplings =("cell", "particle", "local") + acceptable_samplings = ("cell", "particle", "local") if sampling_type not in acceptable_samplings: raise ValueError( "Invalid sampling type %s. Valid sampling types are %s", sampling_type, - ", ".join(acceptable_samplings) + ", ".join(acceptable_samplings), ) if particle_type: From 6f33fd6fb560e7a2ab1f22b19768dedc76569c58 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 11:22:36 +0200 Subject: [PATCH 297/653] black pass --- yt/frontends/ramses/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 1a82bb45dac..d8025a54372 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -615,7 +615,7 @@ def _sanitize_max_level(max_level, max_level_convention): # to be set, so we cannot convert from to yt/ramses # conventions if max_level is None and max_level_convention is None: - return (2**999, "yt") + return (2 ** 999, "yt") # Check max_level is a valid, positive integer if not isinstance(max_level, (int, np.integer)): From 428cdbebe096cf642bbc426c55f37c2d08e2d79b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:25:57 +0200 Subject: [PATCH 298/653] add inline comment Co-authored-by: Corentin Cadiou --- yt/convenience.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/convenience.py b/yt/convenience.py index 04de6f30d42..d895c047cf1 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -56,6 +56,7 @@ def load(fn, *args, **kwargs): return DatasetSeries(fn, *args, **kwargs) + # Unless the dataset starts with http, look for it using the path or relative to the data dir (in this order). if not (os.path.exists(fn) or fn.startswith("http")): data_dir = ytcfg.get("yt", "test_data_dir") alt_fn = os.path.join(data_dir, fn) From e44ef752af837b24d44d000a6c47b1041e03d878 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:26:33 +0200 Subject: [PATCH 299/653] simplify import statements Co-authored-by: Corentin Cadiou --- yt/utilities/answer_testing/utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 1fd64a051cc..13b3e46bbc5 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -19,9 +19,9 @@ from yt.frontends.ytdata.api import save_as_dataset from yt.units.yt_array import YTArray, YTQuantity from yt.visualization import ( - particle_plots as particle_plots, + particle_plots, plot_window as pw, - profile_plotter as profile_plotter, + profile_plotter, ) from yt.visualization.volume_rendering.scene import Scene From 6ad15dc11c495f6456fbd643a2f0cebb6efeab8b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:30:31 +0200 Subject: [PATCH 300/653] error message polishing Co-authored-by: Corentin Cadiou --- yt/utilities/exceptions.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index ffd2282a395..262d0165d5b 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -25,6 +25,7 @@ def __str__(self): msg += ", %s" % self.args if self.kwargs is not None: msg += ", %s" % self.kwargs + msg += '.' return msg From 88dd2f0e3af45b0e157d7789053a69e74563dca6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:31:49 +0200 Subject: [PATCH 301/653] error message polishing Co-authored-by: Corentin Cadiou --- yt/convenience.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index d895c047cf1..2b1516f35a1 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -63,9 +63,9 @@ def load(fn, *args, **kwargs): if os.path.exists(alt_fn): fn = alt_fn else: - msg = f"No such file or directory: '{fn}'" + msg = f"No such file or directory: '{fn}'." if os.path.exists(data_dir): - msg += f"\n(Also tried '{alt_fn}')" + msg += f"\n(Also tried '{alt_fn}')." raise FileNotFoundError(msg) candidates = [] From cc7a2d7577914d13fb7cd7103b30c2711187d2c0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:37:27 +0200 Subject: [PATCH 302/653] add a tear down stage to test --- yt/tests/test_load_errors.py | 16 ++++++++++++---- 1 file changed, 12 insertions(+), 4 deletions(-) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 1316d8cc00d..bae9c317575 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -10,6 +10,7 @@ YTOutputNotIdentified, YTSimulationNotIdentified, ) +from yt.utilities.parameter_file_storage import output_type_registry def test_load_nonexistent_data(): @@ -63,7 +64,14 @@ class FakeDataset2(Dataset): def _is_valid(cls, *args, **kwargs): return True - with tempfile.TemporaryDirectory() as tmpdir: - empty_file_path = Path(tmpdir) / "empty_file" - empty_file_path.touch() - assert_raises(YTAmbiguousDataType, load, tmpdir) + try: + with tempfile.TemporaryDirectory() as tmpdir: + empty_file_path = Path(tmpdir) / "empty_file" + empty_file_path.touch() + assert_raises(YTAmbiguousDataType, load, tmpdir) + except Exception: + raise + finally: + # tear down to avoid possible breakage in following tests + output_type_registry.pop("FakeDataset") + output_type_registry.pop("FakeDataset2") From 2d9d84e50323f8a8c22074b27615df48b9b44620 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 11:54:19 +0200 Subject: [PATCH 303/653] formatting --- yt/utilities/answer_testing/utils.py | 6 +----- yt/utilities/exceptions.py | 2 +- 2 files changed, 2 insertions(+), 6 deletions(-) diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 13b3e46bbc5..cbb6979b5bc 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -18,11 +18,7 @@ from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.api import save_as_dataset from yt.units.yt_array import YTArray, YTQuantity -from yt.visualization import ( - particle_plots, - plot_window as pw, - profile_plotter, -) +from yt.visualization import particle_plots, plot_window as pw, profile_plotter from yt.visualization.volume_rendering.scene import Scene diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 262d0165d5b..2678be19c19 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -25,7 +25,7 @@ def __str__(self): msg += ", %s" % self.args if self.kwargs is not None: msg += ", %s" % self.kwargs - msg += '.' + msg += "." return msg From 0dd2926c1f985d78692e6f46ca05e2073828d5fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 12:00:16 +0200 Subject: [PATCH 304/653] cleanup test --- yt/tests/test_load_errors.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index bae9c317575..7990711e27e 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -66,8 +66,6 @@ def _is_valid(cls, *args, **kwargs): try: with tempfile.TemporaryDirectory() as tmpdir: - empty_file_path = Path(tmpdir) / "empty_file" - empty_file_path.touch() assert_raises(YTAmbiguousDataType, load, tmpdir) except Exception: raise From 698879237f31a866defd50cbf8abf247e77ed8df Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 11:55:28 +0100 Subject: [PATCH 305/653] Replace duplicate function. To be refactored eventually. --- yt/frontends/halo_catalog/io.py | 55 ++------------------------------- 1 file changed, 3 insertions(+), 52 deletions(-) diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index aa50c6d5045..7703fb33454 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -2,6 +2,7 @@ import numpy as np +from yt.frontends.gadget_fof.io import IOHandlerGadgetFOFHaloHDF5 from yt.funcs import parse_h5_attr from yt.units.yt_array import uvstack from yt.utilities.io_handler import BaseIOHandler @@ -113,58 +114,8 @@ def _read_particle_fields(self, dobj, ptf): for field, field_data in all_data.items(): yield field, field_data - def _read_particle_selection(self, dobj, fields): - rv = {} - ind = {} - # We first need a set of masks for each particle type - ptf = defaultdict(list) # ON-DISK TO READ - fsize = defaultdict(lambda: 0) # COUNT RV - field_maps = defaultdict(list) # ptypes -> fields - unions = self.ds.particle_unions - # What we need is a mapping from particle types to return types - for field in fields: - ftype, fname = field - fsize[field] = 0 - # We should add a check for p.fparticle_unions or something here - if ftype in unions: - for pt in unions[ftype]: - ptf[pt].append(fname) - field_maps[pt, fname].append(field) - else: - ptf[ftype].append(fname) - field_maps[field].append(field) - - # Now we allocate - psize = {dobj.ptype: dobj.particle_number} - for field in fields: - if field[0] in unions: - for pt in unions[field[0]]: - fsize[field] += psize.get(pt, 0) - else: - fsize[field] += psize.get(field[0], 0) - for field in fields: - if field[1] in self._vector_fields: - shape = (fsize[field], self._vector_fields[field[1]]) - elif field[1] in self._array_fields: - shape = (fsize[field],) + self._array_fields[field[1]] - elif field in self.ds.scalar_field_list: - shape = (1,) - else: - shape = (fsize[field],) - rv[field] = np.empty(shape, dtype="float64") - ind[field] = 0 - # Now we read. - for field_r, vals in self._read_particle_fields(dobj, ptf): - # Note that we now need to check the mappings - for field_f in field_maps[field_r]: - my_ind = ind[field_f] - rv[field_f][my_ind : my_ind + vals.shape[0], ...] = vals - ind[field_f] += vals.shape[0] - # Now we need to truncate all our fields, since we allow for - # over-estimating. - for field_f in ind: - rv[field_f] = rv[field_f][: ind[field_f]] - return rv + # This will be refactored. + _read_particle_selection = IOHandlerGadgetFOFHaloHDF5._read_particle_selection class IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): From e3eac8ecf79349df060211afe9565dd24f752d9f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 12:06:38 +0100 Subject: [PATCH 306/653] Just pass if _setup_filenames has already been called. --- yt/geometry/particle_geometry_handler.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index ddae7423a4e..a4ab1a09f2e 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -47,9 +47,7 @@ def convert(self, unit): def _setup_filenames(self): if hasattr(self, "data_files"): - raise RuntimeError( - "_setup_filenames has already been called. Something is wrong." - ) + pass template = self.dataset.filename_template ndoms = self.dataset.file_count From 392fa0fd83aa691703175e9d6c3f35af61328ed8 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 13:20:24 +0100 Subject: [PATCH 307/653] Add required class methods. --- yt/frontends/gadget_fof/data_structures.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 8203e55a3a3..b147a0de28b 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -490,6 +490,12 @@ def __repr__(self): def _setup_classes(self): self.objects = [] + def _set_code_units_attributes(self): + pass + + def _is_valid(self, *args, **kwargs): + return False + class GadgetFOFHaloContainer(YTSelectionContainer): """ From ad898e8e3954bc348daaa449d5ed73db778785e9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 15:06:24 +0200 Subject: [PATCH 308/653] Remove mylog formatting strings --- .../construction_data_containers.py | 16 +++--- yt/data_objects/selection_data_containers.py | 2 +- yt/data_objects/static_output.py | 3 +- yt/data_objects/time_series.py | 2 +- yt/fields/local_fields.py | 3 +- yt/fields/xray_emission_fields.py | 9 +-- yt/utilities/linear_interpolators.py | 12 ++-- yt/utilities/load_sample.py | 4 +- yt/utilities/orientation.py | 4 +- yt/utilities/performance_counters.py | 2 +- yt/utilities/sdf.py | 56 ++++++++++--------- yt/visualization/fits_image.py | 4 +- yt/visualization/fixed_resolution.py | 18 ++++-- yt/visualization/line_plot.py | 2 +- yt/visualization/plot_window.py | 16 +++--- 15 files changed, 79 insertions(+), 74 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index e09e3aff35c..87996ef9d69 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -505,7 +505,7 @@ def deserialize(self, fields): deserialized_successfully = self._mrep.restore(store_file, self.ds) if deserialized_successfully: - mylog.info("Using previous projection data from %s" % store_file) + mylog.info("Using previous projection data from %s", store_file) for field, field_data in self._mrep.field_data.items(): self[field] = field_data if not deserialized_successfully: @@ -1539,7 +1539,7 @@ def get_data(self, fields=None, sample_type="face", no_ghost=False): fields = fields[0] # Now we have a "fields" value that is either a string or None if fields is not None: - mylog.info("Extracting (sampling: %s)" % (fields,)) + mylog.info("Extracting (sampling: %s)", fields) verts = [] samples = [] for _io_chunk in parallel_objects(self.data_source.chunks([], "io")): @@ -2544,19 +2544,19 @@ def _upload_to_sketchfab(self, data, files): try: r = requests.post(SKETCHFAB_API_URL, data=data, files=files, verify=False) except requests.exceptions.RequestException as e: - mylog.error("An error occured: {}".format(e)) + mylog.error("An error occured: %s", e) return result = r.json() if r.status_code != requests.codes.created: - mylog.error("Upload to SketchFab failed with error: {}".format(result)) + mylog.error("Upload to SketchFab failed with error: %s", result) return model_uid = result["uid"] model_url = SKETCHFAB_MODEL_URL + model_uid if model_uid: - mylog.info("Model uploaded to: {}".format(model_url)) + mylog.info("Model uploaded to: %s", model_url) else: mylog.error("Problem uploading.") @@ -2657,7 +2657,7 @@ def _generate_tree(self, fname=None): self._octree = None return - mylog.info("Allocating Octree for %s particles" % positions.shape[0]) + mylog.info("Allocating Octree for %s particles", positions.shape[0]) self.loaded = False self._octree = CyOctree( positions.astype("float64", copy=False), @@ -2670,7 +2670,7 @@ def _generate_tree(self, fname=None): ) if fname is not None: - mylog.info("Saving octree to file %s" % os.path.basename(fname)) + mylog.info("Saving octree to file %s", os.path.basename(fname)) self._octree.save(fname) @property @@ -2697,7 +2697,7 @@ def tree(self): self._generate_tree(fname) else: self.loaded = True - mylog.info("Loading octree from %s" % os.path.basename(fname)) + mylog.info("Loading octree from %s", os.path.basename(fname)) self._octree = CyOctree() self._octree.load(fname) if self._octree.data_version != self.ds._file_hash: diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 377bc27888a..d7b8760665f 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -241,7 +241,7 @@ def __init__( ).any(): mylog.warn( "Ray start or end is outside the domain. " - + "Returned data will only be for the ray section inside the domain." + "Returned data will only be for the ray section inside the domain." ) self.vec = self.end_point - self.start_point self._set_center(self.start_point) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 0ad62cd35c4..cddac19ef77 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1378,8 +1378,7 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): # Handle the case where the field has already been added. if not override and name in self.field_info: mylog.error( - "Field %s already exists. To override use " + "force_override=True.", - name, + "Field %s already exists. To override use 'force_override=True'.", name, ) if kwargs.setdefault("particle_type", False): if sampling_type is not None and sampling_type != "particle": diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 1cd50b8b63d..64c8e6ff122 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -616,7 +616,7 @@ def print_key_parameters(self): self._print_attr(a) for a in self.key_parameters: self._print_attr(a) - mylog.info("Total datasets: %d." % len(self.all_outputs)) + mylog.info("Total datasets: %d.", len(self.all_outputs)) def _print_attr(self, a): """ diff --git a/yt/fields/local_fields.py b/yt/fields/local_fields.py index a3ca48871c5..88d41785f1e 100644 --- a/yt/fields/local_fields.py +++ b/yt/fields/local_fields.py @@ -17,8 +17,7 @@ def add_field(self, name, function=None, sampling_type=None, **kwargs): # Handle the case where the field has already been added. if not override and name in self: mylog.error( - "Field %s already exists. To override use " + "force_override=True.", - name, + "Field %s already exists. To override use 'force_override=True'.", name, ) if kwargs.setdefault("particle_type", False): if sampling_type is not None and sampling_type != "particle": diff --git a/yt/fields/xray_emission_fields.py b/yt/fields/xray_emission_fields.py index cb9aea81030..9fb3ecb4a23 100644 --- a/yt/fields/xray_emission_fields.py +++ b/yt/fields/xray_emission_fields.py @@ -26,9 +26,9 @@ def _get_data_file(table_type, data_dir=None): data_dir = supp_data_dir if os.path.exists(supp_data_dir) else "." data_path = os.path.join(data_dir, data_file) if not os.path.exists(data_path): - msg = ( - "Failed to find emissivity data file %s! " % data_file - + "Please download from %s!" % data_url + msg = "Failed to find emissivity data file %s! Please download from %s" % ( + data_file, + data_url, ) mylog.error(msg) raise IOError(msg) @@ -373,6 +373,7 @@ def _photon_intensity_field(field, data): fields += [ei_name, i_name] - [mylog.info("Adding ('%s','%s') field." % field) for field in fields] + for field in fields: + mylog.info("Adding ('%s','%s') field.", field[0], field[1]) return fields diff --git a/yt/utilities/linear_interpolators.py b/yt/utilities/linear_interpolators.py index bdd4502cfbd..1b69e1edfd4 100644 --- a/yt/utilities/linear_interpolators.py +++ b/yt/utilities/linear_interpolators.py @@ -52,8 +52,8 @@ def __call__(self, data_object): if np.any((x_i == -1) | (x_i == len(self.x_bins) - 1)): if not self.truncate: mylog.error( - "Sorry, but your values are outside" - + " the table! Dunno what to do, so dying." + "Sorry, but your values are outside " + "the table! Dunno what to do, so dying." ) mylog.error("Error was in: %s", data_object) raise ValueError @@ -128,8 +128,8 @@ def __call__(self, data_object): ): if not self.truncate: mylog.error( - "Sorry, but your values are outside" - + " the table! Dunno what to do, so dying." + "Sorry, but your values are outside " + "the table! Dunno what to do, so dying." ) mylog.error("Error was in: %s", data_object) raise ValueError @@ -217,8 +217,8 @@ def __call__(self, data_object): ): if not self.truncate: mylog.error( - "Sorry, but your values are outside" - + " the table! Dunno what to do, so dying." + "Sorry, but your values are outside " + "the table! Dunno what to do, so dying." ) mylog.error("Error was in: %s", data_object) raise ValueError diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py index 9a3be8060e9..6f2f7c856dc 100644 --- a/yt/utilities/load_sample.py +++ b/yt/utilities/load_sample.py @@ -110,9 +110,7 @@ def _validate_sampledata_name(name): """ if not isinstance(name, str): - mylog.error( - "The argument {} passed to ".format(name) + "load_sample() is not a string." - ) + mylog.error("The argument %s passed to load_sample() is not a string.", name) # now get the extension if it exists base, ext = os.path.splitext(name) diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py index 60a6b1bff25..00b101adb7c 100644 --- a/yt/utilities/orientation.py +++ b/yt/utilities/orientation.py @@ -62,9 +62,7 @@ def _setup_normalized_vectors(self, normal_vector, north_vector): normal_vector, north_vector = _validate_unit_vectors( normal_vector, north_vector ) - mylog.debug( - "Setting normalized vectors" + str(normal_vector) + str(north_vector) - ) + mylog.debug("Setting normalized vectors %s %s", normal_vector, north_vector) # Now we set up our various vectors normal_vector /= np.sqrt(np.dot(normal_vector, normal_vector)) if north_vector is None: diff --git a/yt/utilities/performance_counters.py b/yt/utilities/performance_counters.py index 60f90127e59..392cb00b5bf 100644 --- a/yt/utilities/performance_counters.py +++ b/yt/utilities/performance_counters.py @@ -87,7 +87,7 @@ def print_stats(self): i, self.counters[i], ) - mylog.info("\n" + line) + mylog.info("\n%s", line) def exit(self): if self._on: diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index 521ba98c96f..4d0b225cc47 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -247,7 +247,7 @@ def set_offset(self, offset): def build_memmap(self): assert self.size != -1 mylog.info( - "Building memmap with offset: %i and size %i" % (self._offset, self.size) + "Building memmap with offset: %i and size %i", self._offset, self.size ) self.handle = self.HTTPArray( self.filename, dtype=self.dtype, shape=self.size, offset=self._offset @@ -691,9 +691,7 @@ def set_bounds(self): f2 = 1 << int(np.log2(ic_Nmesh - 1) + 1) if f2 != ic_Nmesh: expand_root = 1.0 * f2 / ic_Nmesh - 1.0 - mylog.debug( - "Expanding: %s, %s, %s" % (f2, ic_Nmesh, expand_root) - ) + mylog.debug("Expanding: %s, %s, %s", f2, ic_Nmesh, expand_root) rmin *= 1.0 + expand_root rmax *= 1.0 + expand_root @@ -705,10 +703,12 @@ def set_bounds(self): ) / 2 self.domain_active_dims = self.domain_dims - 2 * self.domain_buffer - mylog.debug("MIDX rmin: %s, rmax: %s" % (self.rmin, self.rmax)) + mylog.debug("MIDX rmin: %s, rmax: %s", self.rmin, self.rmax) mylog.debug( - "MIDX: domain_width: %s, domain_dims: %s, domain_active_dims: %s " - % (self.domain_width, self.domain_dims, self.domain_active_dims) + "MIDX: domain_width: %s, domain_dims: %s, domain_active_dims: %s ", + self.domain_width, + self.domain_dims, + self.domain_active_dims, ) def spread_bits(self, ival, level=None): @@ -802,7 +802,7 @@ def get_ibbox(self, ileft, iright): # print('Getting data from ileft to iright:', ileft, iright) ix, iy, iz = (iright - ileft) * 1j - mylog.debug("MIDX IBBOX: %s %s %s %s %s" % (ileft, iright, ix, iy, iz)) + mylog.debug("MIDX IBBOX: %s %s %s %s %s", ileft, iright, ix, iy, iz) # plus 1 that is sliced, plus a bit since mgrid is not inclusive Z, Y, X = np.mgrid[ @@ -925,7 +925,7 @@ def get_previous_nonzero_chunk(self, key, stop=None): def iter_data(self, inds, fields): num_inds = len(inds) num_reads = 0 - mylog.debug("MIDX Reading %i chunks" % num_inds) + mylog.debug("MIDX Reading %i chunks", num_inds) i = 0 while i < num_inds: ind = inds[i] @@ -950,8 +950,11 @@ def iter_data(self, inds, fields): chunk = slice(base, base + length) mylog.debug( - "Reading chunk %i of length %i after catting %i starting at %i" - % (i, length, combined, ind) + "Reading chunk %i of length %i after catting %i starting at %i", + i, + length, + combined, + ind, ) num_reads += 1 if length > 0: @@ -959,7 +962,7 @@ def iter_data(self, inds, fields): yield data del data i += 1 - mylog.debug("Read %i chunks, batched into %i reads" % (num_inds, num_reads)) + mylog.debug("Read %i chunks, batched into %i reads", num_inds, num_reads) def filter_particles(self, myiter, myfilter): for data in myiter: @@ -994,8 +997,7 @@ def filter_bbox(self, left, right, myiter): # print('Mask shape, sum:', mask.shape, mask.sum()) mylog.debug( - "Filtering particles, returning %i out of %i" - % (mask.sum(), mask.shape[0]) + "Filtering particles, returning %i out of %i", mask.sum(), mask.shape[0] ) if not np.any(mask): @@ -1034,8 +1036,7 @@ def filter_sphere(self, center, radius, myiter): mask = ((pos - center) ** 2).sum(axis=1) ** 0.5 < radius mylog.debug( - "Filtering particles, returning %i out of %i" - % (mask.sum(), mask.shape[0]) + "Filtering particles, returning %i out of %i", mask.sum(), mask.shape[0] ) if not np.any(mask): @@ -1060,7 +1061,7 @@ def iter_filtered_bbox_fields(self, left, right, data, pos_fields, fields): if pos_fields is None: pos_fields = "x", "y", "z" xf, yf, zf = pos_fields - mylog.debug("Using position fields: %s" % pos_fields) + mylog.debug("Using position fields: %s", pos_fields) # I'm sorry. pos = ( @@ -1079,14 +1080,17 @@ def iter_filtered_bbox_fields(self, left, right, data, pos_fields, fields): _shift_periodic(pos, left, right, DW) mylog.debug( - "Periodic filtering, %s %s %s %s" - % (left, right, pos.min(axis=0), pos.max(axis=0)) + "Periodic filtering, %s %s %s %s", + left, + right, + pos.min(axis=0), + pos.max(axis=0), ) # Now get all particles that are within the bbox mask = np.all(pos >= left, axis=1) * np.all(pos < right, axis=1) mylog.debug( - "Filtering particles, returning %i out of %i" % (mask.sum(), mask.shape[0]) + "Filtering particles, returning %i out of %i", mask.sum(), mask.shape[0] ) if np.any(mask): @@ -1105,7 +1109,7 @@ def iter_bbox_data(self, left, right, fields): and a right. """ _ensure_xyz_fields(fields) - mylog.debug("MIDX Loading region from %s to %s" % (left, right)) + mylog.debug("MIDX Loading region from %s to %s", left, right) inds = self.get_bbox(left, right) # Need to put left/right in float32 to avoid fp roundoff errors # in the bbox later. @@ -1126,14 +1130,14 @@ def iter_sphere_data(self, center, radius, fields): a radius. """ _ensure_xyz_fields(fields) - mylog.debug("MIDX Loading spherical region %s to %s" % (center, radius)) + mylog.debug("MIDX Loading spherical region %s to %s", center, radius) inds = self.get_bbox(center - radius, center + radius) for dd in self.filter_sphere(center, radius, self.iter_data(inds, fields)): yield dd def iter_ibbox_data(self, left, right, fields): - mylog.debug("MIDX Loading region from %s to %s" % (left, right)) + mylog.debug("MIDX Loading region from %s to %s", left, right) inds = self.get_ibbox(left, right) return self.iter_data(inds, fields) @@ -1157,7 +1161,7 @@ def get_contiguous_chunk(self, left_key, right_key, fields): length = rbase + rlen - lbase if length > 0: mylog.debug( - "Getting contiguous chunk of size %i starting at %i" % (length, lbase) + "Getting contiguous chunk of size %i starting at %i", length, lbase ) return self.get_data(slice(lbase, lbase + length), fields) @@ -1170,7 +1174,7 @@ def get_key_data(self, key, fields): length = self.indexdata["len"][key] - base if length > 0: mylog.debug( - "Getting contiguous chunk of size %i starting at %i" % (length, base) + "Getting contiguous chunk of size %i starting at %i", length, base ) return self.get_data(slice(base, base + length), fields) @@ -1247,7 +1251,7 @@ def get_cell_data(self, level, cell_iarr, fields): """ cell_iarr = np.array(cell_iarr, dtype="int64") lk, rk = self.get_key_bounds(level, cell_iarr) - mylog.debug("Reading contiguous chunk from %i to %i" % (lk, rk)) + mylog.debug("Reading contiguous chunk from %i to %i", lk, rk) return self.get_contiguous_chunk(lk, rk, fields) def get_cell_bbox(self, level, cell_iarr): diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 31aa539762e..2e7095d6654 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -261,7 +261,7 @@ def __init__( self.field_units[name] = str(funits) else: self.field_units[name] = "dimensionless" - mylog.info("Making a FITS image of field %s" % name) + mylog.info("Making a FITS image of field %s", name) if isinstance(this_img, ImageArray): if i == 0: self.shape = this_img.shape[::-1] @@ -845,7 +845,7 @@ def construct_image(ds, axis, data_source, center, image_res, width, length_unit unit = ds.get_smallest_appropriate_unit(width[0]) mylog.info( "Making an image of the entire domain, " - + "so setting the center to the domain center." + "so setting the center to the domain center." ) else: width = ds.coordinates.sanitize_width(axis, width, None) diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index 49087d18323..05e3f7378ff 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -132,8 +132,10 @@ def __getitem__(self, item): if item in self.data: return self.data[item] mylog.info( - "Making a fixed resolution buffer of (%s) %d by %d" - % (item, self.buff_size[0], self.buff_size[1]) + "Making a fixed resolution buffer of (%s) %d by %d", + item, + self.buff_size[0], + self.buff_size[1], ) bounds = [] for b in self.bounds: @@ -626,8 +628,10 @@ def __getitem__(self, item): if item in self.data: return self.data[item] mylog.info( - "Making a fixed resolution buffer of (%s) %d by %d" - % (item, self.buff_size[0], self.buff_size[1]) + "Making a fixed resolution buffer of (%s) %d by %d", + item, + self.buff_size[0], + self.buff_size[1], ) dd = self.data_source width = self.ds.arr( @@ -685,8 +689,10 @@ def __getitem__(self, item): return self.data[item] mylog.info( - "Splatting (%s) onto a %d by %d mesh" - % (item, self.buff_size[0], self.buff_size[1]) + "Splatting (%s) onto a %d by %d mesh", + item, + self.buff_size[0], + self.buff_size[1], ) bounds = [] diff --git a/yt/visualization/line_plot.py b/yt/visualization/line_plot.py index 47d50ffacba..e57e7b8a3a2 100644 --- a/yt/visualization/line_plot.py +++ b/yt/visualization/line_plot.py @@ -63,7 +63,7 @@ def __setitem__(self, item, val): def __getitem__(self, item): if item in self.data: return self.data[item] - mylog.info("Making a line buffer with %d points of %s" % (self.npoints, item)) + mylog.info("Making a line buffer with %d points of %s", self.npoints, item) self.points, self.data[item] = self.ds.coordinates.pixelize_line( item, self.start_point, self.end_point, self.npoints ) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 44976a3c2c8..4620665d63b 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -595,10 +595,10 @@ def _set_window(self, bounds): self.ylim = tuple(bounds[2:4]) if len(bounds) == 6: self.zlim = tuple(bounds[4:6]) - mylog.info("xlim = %f %f" % self.xlim) - mylog.info("ylim = %f %f" % self.ylim) + mylog.info("xlim = %f %f", self.xlim[0], self.xlim[1]) + mylog.info("ylim = %f %f", self.ylim[0], self.ylim[1]) if hasattr(self, "zlim"): - mylog.info("zlim = %f %f" % self.zlim) + mylog.info("zlim = %f %f", self.zlim[0], self.zlim[1]) @invalidate_data def set_width(self, width, unit=None): @@ -1156,9 +1156,9 @@ def _setup_plots(self): else: mylog.error( - "Unable to draw cbar minorticks for field {} with transform {} ".format( - f, self._field_transform[f] - ) + "Unable to draw cbar minorticks for field %s with transform %s ", + f, + self._field_transform[f], ) self._cbar_minorticks[f] = False @@ -1513,7 +1513,7 @@ def __init__( "geographic", "internal_geographic", ): - mylog.info("Setting origin='native' for %s geometry." % ds.geometry) + mylog.info("Setting origin='native' for %s geometry.", ds.geometry) origin = "native" if isinstance(ds, YTSpatialPlotDataset): @@ -1726,7 +1726,7 @@ def __init__( "geographic", "internal_geographic", ): - mylog.info("Setting origin='native' for %s geometry." % ds.geometry) + mylog.info("Setting origin='native' for %s geometry.", ds.geometry) origin = "native" # proj_style is deprecated, but if someone specifies then it trumps # method. From ef51ad5199692afcf1a8ab491aa115c00c423113 Mon Sep 17 00:00:00 2001 From: convert-repo Date: Thu, 6 Aug 2020 15:55:09 +0200 Subject: [PATCH 309/653] Second pass ! --- yt/data_objects/level_sets/clump_handling.py | 23 ++++++++--------- yt/frontends/amrvac/data_structures.py | 14 +++++------ yt/frontends/amrvac/fields.py | 8 +++--- yt/frontends/art/data_structures.py | 8 +++--- yt/frontends/art/io.py | 2 +- yt/frontends/artio/data_structures.py | 2 +- yt/frontends/athena/data_structures.py | 19 ++++++++------ yt/frontends/enzo/simulation_handling.py | 2 +- yt/frontends/fits/data_structures.py | 25 +++++++++---------- yt/frontends/fits/misc.py | 6 ++--- yt/frontends/flash/data_structures.py | 12 +++++---- yt/frontends/gadget/simulation_handling.py | 3 +-- yt/frontends/gadget_fof/data_structures.py | 12 ++++----- yt/frontends/open_pmd/data_structures.py | 12 ++++----- yt/frontends/open_pmd/fields.py | 10 ++++---- yt/frontends/owls/fields.py | 2 +- yt/frontends/ramses/data_structures.py | 11 +++----- yt/frontends/ramses/definitions.py | 2 +- yt/frontends/ramses/field_handlers.py | 6 ++--- yt/frontends/ramses/io.py | 4 +-- yt/frontends/sph/data_structures.py | 4 +-- yt/frontends/stream/data_structures.py | 4 +-- yt/frontends/swift/data_structures.py | 6 ++--- yt/frontends/ytdata/utilities.py | 2 +- yt/funcs.py | 8 ------ .../coordinates/cartesian_coordinates.py | 6 +++-- yt/startup_tasks.py | 2 +- yt/utilities/amr_kdtree/amr_kdtools.py | 6 ++--- yt/utilities/amr_kdtree/amr_kdtree.py | 4 +-- .../parallel_analysis_interface.py | 4 +-- .../interactive_vr_helpers.py | 2 +- .../volume_rendering/off_axis_projection.py | 2 +- .../volume_rendering/old_camera.py | 8 +++--- .../volume_rendering/render_source.py | 2 +- .../transfer_function_helper.py | 4 +-- .../volume_rendering/transfer_functions.py | 5 ++-- .../volume_rendering/volume_rendering.py | 2 +- 37 files changed, 122 insertions(+), 132 deletions(-) diff --git a/yt/data_objects/level_sets/clump_handling.py b/yt/data_objects/level_sets/clump_handling.py index 994cdd4ac4a..b5f5f927258 100644 --- a/yt/data_objects/level_sets/clump_handling.py +++ b/yt/data_objects/level_sets/clump_handling.py @@ -411,9 +411,7 @@ def __getitem__(self, request): def find_clumps(clump, min_val, max_val, d_clump): - mylog.info( - "Finding clumps: min: %e, max: %e, step: %f" % (min_val, max_val, d_clump) - ) + mylog.info("Finding clumps: min: %e, max: %e, step: %f", min_val, max_val, d_clump) if min_val >= max_val: return clump.find_children(min_val, max_val=max_val) @@ -423,7 +421,7 @@ def find_clumps(clump, min_val, max_val, d_clump): elif len(clump.children) > 0: these_children = [] - mylog.info("Investigating %d children." % len(clump.children)) + mylog.info("Investigating %d children.", len(clump.children)) for child in clump.children: find_clumps(child, min_val * d_clump, max_val, d_clump) if len(child.children) > 0: @@ -432,19 +430,19 @@ def find_clumps(clump, min_val, max_val, d_clump): these_children.append(child) else: mylog.info( - ("Eliminating invalid, childless clump with " + "%d cells.") - % len(child.data["ones"]) + "Eliminating invalid, childless clump with %d cells.", + len(child.data["ones"]), ) if len(these_children) > 1: mylog.info( - "%d of %d children survived." - % (len(these_children), len(clump.children)) + "%d of %d children survived.", len(these_children), len(clump.children) ) clump.children = these_children elif len(these_children) == 1: mylog.info( - ("%d of %d children survived, linking its " + "children to parent.") - % (len(these_children), len(clump.children)) + "%d of %d children survived, linking its children to parent.", + len(these_children), + len(clump.children), ) clump.children = these_children[0].children for child in clump.children: @@ -452,8 +450,9 @@ def find_clumps(clump, min_val, max_val, d_clump): child.data.parent = clump.data else: mylog.info( - "%d of %d children survived, erasing children." - % (len(these_children), len(clump.children)) + "%d of %d children survived, erasing children.", + len(these_children), + len(clump.children), ) clump.children = [] diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index a390b5aabfd..30fde503bde 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -196,9 +196,9 @@ def __init__( if namelist_gamma is not None and self.gamma != namelist_gamma: mylog.error( - "Inconsistent values in gamma: datfile {}, parfiles {}".format( - self.gamma, namelist_gamma - ) + "Inconsistent values in gamma: datfile %s, parfiles %s", + self.gamma, + namelist_gamma, ) if "method_list" in namelist: @@ -312,8 +312,8 @@ def _parse_parameter_file(self): elif self.parameters["datfile_version"] > 4: # py38: walrus here mylog.error( - "No 'geometry' flag found in datfile with version %d >4." - % self.parameters["datfile_version"] + "No 'geometry' flag found in datfile with version %d >4.", + self.parameters["datfile_version"], ) if self._geometry_override is not None: @@ -329,8 +329,8 @@ def _parse_parameter_file(self): ) except ValueError: mylog.error( - "Unable to parse geometry_override '%s' (will be ignored)." - % self._geometry_override + "Unable to parse geometry_override '%s' (will be ignored).", + self._geometry_override, ) if self.geometry is None: diff --git a/yt/frontends/amrvac/fields.py b/yt/frontends/amrvac/fields.py index 13b60eb4ed3..04c235ab4e9 100644 --- a/yt/frontends/amrvac/fields.py +++ b/yt/frontends/amrvac/fields.py @@ -41,8 +41,8 @@ def _velocity(field, data, idir, prefix=None): mask1 = rho == 0 if mask1.any(): mylog.info( - "zeros found in %sdensity, patching them to compute corresponding velocity field." - % prefix + "zeros found in %sdensity, patching them to compute corresponding velocity field.", + prefix, ) mask2 = moment == 0 if not ((mask1 & mask2) == mask1).all(): @@ -132,8 +132,8 @@ def _setup_dust_fields(self): if idust > MAXN_DUST_SPECIES: mylog.error( "Only the first %d dust species are currently read by yt. " - "If you read this, please consider issuing a ticket. " - % MAXN_DUST_SPECIES + "If you read this, please consider issuing a ticket. ", + MAXN_DUST_SPECIES, ) break self._setup_velocity_fields(idust) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index bf6e34457d0..b2bb623a5c2 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -267,7 +267,7 @@ def _parse_parameter_file(self): self.root_nocts = self.domain_dimensions.prod() // 8 self.root_ncells = self.root_nocts * 8 mylog.debug( - "Estimating %i cells on a root grid side," + "%i root octs", + "Estimating %i cells on a root grid side, %i root octs", est, self.root_nocts, ) @@ -323,7 +323,8 @@ def _parse_parameter_file(self): ls_nonzero = np.append(lspecies[0], ls_nonzero) self.star_type = len(ls_nonzero) mylog.info("Discovered %i species of particles", len(ls_nonzero)) - mylog.info("Particle populations: " + "%9i " * len(ls_nonzero), *ls_nonzero) + info_str = "Particle populations: " + "%9i " * len(ls_nonzero) + mylog.info(info_str, *ls_nonzero) self._particle_type_counts = dict(zip(self.particle_types_raw, ls_nonzero)) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): @@ -614,7 +615,8 @@ def _parse_parameter_file(self): ls_nonzero = np.append(lspecies[0], ls_nonzero) self.star_type = len(ls_nonzero) mylog.info("Discovered %i species of particles", len(ls_nonzero)) - mylog.info("Particle populations: " + "%9i " * len(ls_nonzero), *ls_nonzero) + info_str = "Particle populations: " + "%9i " * len(ls_nonzero) + mylog.info(info_str, *ls_nonzero) for k, v in particle_header_vals.items(): if k in self.parameters.keys(): if not self.parameters[k] == v: diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index c66cf3dc0b2..e9ac776efd0 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -300,7 +300,7 @@ def interpolate_ages( if current_time: tdiff = YTQuantity(b2t(t_stars), "Gyr") - current_time.in_units("Gyr") if np.abs(tdiff) > 1e-4: - mylog.info("Timestamp mismatch in star " + "particle header: %s", tdiff) + mylog.info("Timestamp mismatch in star particle header: %s", tdiff) mylog.info("Interpolating ages") interp_tb, interp_ages = b2t(data) interp_tb = YTArray(interp_tb, "Gyr") diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index faea486ecd2..5a501a71ca7 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -289,7 +289,7 @@ def _identify_base_chunk(self, dobj): ) dobj._chunk_info = ci if len(list_sfc_ranges) > 1: - mylog.info("Created %d chunks for ARTIO" % len(list_sfc_ranges)) + mylog.info("Created %d chunks for ARTIO", len(list_sfc_ranges)) dobj._current_chunk = list(self._chunk_all(dobj))[0] def _data_size(self, dobj, dobjs): diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index 4c0fb90611c..a50ec2fce73 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -158,8 +158,9 @@ def _detect_output_fields(self): grid_dims[grid_dims == 0] = 1 if np.prod(grid_dims) != grid_ncells: mylog.error( - "product of dimensions %i not equal to number of cells %i" - % (np.prod(grid_dims), grid_ncells) + "product of dimensions %i not equal to number of cells %i", + np.prod(grid_dims), + grid_ncells, ) raise TypeError break @@ -222,8 +223,9 @@ def _parse_index(self): grid["dimensions"][grid["dimensions"] == 0] = 1 if np.prod(grid["dimensions"]) != grid["ncells"]: mylog.error( - "product of dimensions %i not equal to number of cells %i" - % (np.prod(grid["dimensions"]), grid["ncells"]) + "product of dimensions %i not equal to number of cells %i", + np.prod(grid["dimensions"]), + grid["ncells"], ) raise TypeError @@ -298,8 +300,9 @@ def _parse_index(self): gridread["dimensions"][gridread["dimensions"] == 0] = 1 if np.prod(gridread["dimensions"]) != gridread["ncells"]: mylog.error( - "product of dimensions %i not equal to number of cells %i" - % (np.prod(gridread["dimensions"]), gridread["ncells"]) + "product of dimensions %i not equal to number of cells %i", + np.prod(gridread["dimensions"]), + gridread["ncells"], ) raise TypeError gdims[j, 0] = gridread["dimensions"][0] @@ -559,8 +562,8 @@ def _parse_parameter_file(self): self.domain_left_edge = grid["left_edge"] mylog.info( - "Temporarily setting domain_right_edge = -domain_left_edge." - + " This will be corrected automatically if it is not the case." + "Temporarily setting domain_right_edge = -domain_left_edge. " + "This will be corrected automatically if it is not the case." ) self.domain_right_edge = -self.domain_left_edge self.domain_width = self.domain_right_edge - self.domain_left_edge diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 73418189bf2..f4807fbf6f7 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -513,7 +513,7 @@ def _get_all_outputs(self, find_outputs=False): ) mylog.info( " Unable to calculate datasets. " - + "Attempting to search in the current directory" + "Attempting to search in the current directory" ) self._find_outputs() diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 09b5be30f46..1d814a60f14 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -169,18 +169,17 @@ def _detect_output_fields(self): self._scale_map[fname][1] = hdu.header["bscale"] self.field_list.append(("fits", fname)) self.dataset.field_units[fname] = units - mylog.info("Adding field %s to the list of fields." % (fname)) + mylog.info("Adding field %s to the list of fields.", fname) if units == "dimensionless": mylog.warning( - "Could not determine dimensions for field %s, " - % (fname) - + "setting to dimensionless." + "Could not determine dimensions for field %s, setting to dimensionless.", + fname, ) else: mylog.warning( - "Image block %s does not have " % (hdu.name.lower()) - + "the same dimensions as the primary and will not be " - + "available as a field." + "Image block %s does not have the same dimensions as the primary and will not be " + "available as a field.", + hdu.name.lower(), ) def _count_grids(self): @@ -404,7 +403,7 @@ def _set_code_unit_attributes(self): if len(set(file_units)) == 1: length_factor = self.wcs.wcs.cdelt[0] length_unit = str(file_units[0]) - mylog.info("Found length units of %s." % length_unit) + mylog.info("Found length units of %s.", length_unit) else: self.no_cgs_equiv_length = True mylog.warning("No length conversion provided. Assuming 1 = 1 cm.") @@ -590,13 +589,13 @@ def _set_code_unit_attributes(self): self.primary_header[short_unit], self.primary_header.comments[short_unit].strip("[]"), ) - mylog.info("Found %s units of %s." % (unit, u)) + mylog.info("Found %s units of %s.", unit, u) else: if unit == "length": # Falling back to old way of getting units for length # in old files u = self.quan(1.0, str(self.wcs.wcs.cunit[0])) - mylog.info("Found %s units of %s." % (unit, u)) + mylog.info("Found %s units of %s.", unit, u) else: # Give up otherwise u = self.quan(1.0, cgs) @@ -655,7 +654,7 @@ def _parse_parameter_file(self): self.geometry = "spectral_cube" log_str = "Detected these axes: " + "%s " * len(self.ctypes) - mylog.info(log_str % tuple([ctype for ctype in self.ctypes])) + mylog.info(log_str, *self.ctypes) self.lat_axis = np.zeros((end - 1), dtype="bool") for p in lat_prefixes: @@ -783,7 +782,7 @@ def _parse_parameter_file(self): max(self.domain_dimensions[[self.lon_axis, self.lat_axis]]) ) self.spectral_factor /= self.domain_dimensions[self.spec_axis] - mylog.info("Setting the spectral factor to %f" % (self.spectral_factor)) + mylog.info("Setting the spectral factor to %f", self.spectral_factor) Dz = ( self.domain_right_edge[self.spec_axis] - self.domain_left_edge[self.spec_axis] @@ -823,7 +822,7 @@ def _detect_output_fields(self): self.field_list = [] for k, v in ds.events_info.items(): fname = "event_" + k - mylog.info("Adding field %s to the list of fields." % (fname)) + mylog.info("Adding field %s to the list of fields.", fname) self.field_list.append(("io", fname)) if k in ["x", "y"]: field_unit = "code_length" diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index 19775cd0385..19f70a69a67 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -54,7 +54,7 @@ def setup_counts_fields(ds, ebounds, ftype="gas"): for (emin, emax) in ebounds: cfunc = _make_counts(emin, emax) fname = "counts_%s-%s" % (emin, emax) - mylog.info("Creating counts field %s." % fname) + mylog.info("Creating counts field %s.", fname) ds.add_field( (ftype, fname), sampling_type="cell", @@ -114,9 +114,7 @@ def create_spectral_slabs(filename, slab_centers, slab_width, **kwargs): slab_center = YTQuantity(v[0], v[1]) else: slab_center = v - mylog.info( - "Adding slab field %s at %g %s" % (k, slab_center.v, slab_center.units) - ) + mylog.info("Adding slab field %s at %g %s", k, slab_center.v, slab_center.units) slab_lo = (slab_center - 0.5 * slab_width).to_astropy() slab_hi = (slab_center + 0.5 * slab_width).to_astropy() subcube = cube.spectral_slab(slab_lo, slab_hi) diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 32b2ae9caba..955a5caf9a9 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -191,7 +191,7 @@ def __init__( ) self.particle_filename = filename.replace("plt_cnt", "part") mylog.info( - "Particle file found: %s" % self.particle_filename.split("/")[-1] + "Particle file found: %s", self.particle_filename.split("/")[-1] ) except IOError: self._particle_handle = self._handle @@ -312,8 +312,9 @@ def _parse_parameter_file(self): pval = val if vn in self.parameters and self.parameters[vn] != pval: mylog.info( - "{0} {1} overwrites a simulation " - "scalar of the same name".format(hn[:-1], vn) + "%s %s overwrites a simulation scalar of the same name", + hn[:-1], + vn, ) if hasattr(pval, "decode"): pval = pval.decode("ascii", "ignore") @@ -341,8 +342,9 @@ def _parse_parameter_file(self): pval = val if vn in self.parameters and self.parameters[vn] != pval: mylog.info( - "{0} {1} overwrites a simulation " - "scalar of the same name".format(hn[:-1], vn) + "%s %s overwrites a simulation scalar of the same name", + hn[:-1], + vn, ) if hasattr(pval, "decode"): pval = pval.decode("ascii", "ignore") diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 6ce53f2ddd7..83195670625 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -384,8 +384,7 @@ def _find_data_dir(self): data_dir = os.path.join(self.directory, self.parameters["OutputDir"]) if not os.path.exists(data_dir): mylog.info( - "OutputDir not found at %s, instead using %s." - % (data_dir, self.directory) + "OutputDir not found at %s, instead using %s.", data_dir, self.directory ) data_dir = self.directory self.data_dir = data_dir diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index dc6efe24325..1874a61d55e 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -660,13 +660,11 @@ def __init__(self, ptype, particle_identifier, ds=None): parent_subhalos = my_data["GroupNsubs"][0] mylog.debug( - "Subhalo %d is subgroup %s of %d in group %d." - % ( - self.particle_identifier, - self.subgroup_identifier, - parent_subhalos, - self.group_identifier, - ) + "Subhalo %d is subgroup %s of %d in group %d.", + self.particle_identifier, + self.subgroup_identifier, + parent_subhalos, + self.group_identifier, ) # ids of the sibling subhalos that come before this one diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index e8c3f5de420..b63ce96da4b 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -465,7 +465,7 @@ def __init__( if len(particles) > 1: # Only use on-disk particle names if there is more than one species self.particle_types = particles - mylog.debug("self.particle_types: {}".format(self.particle_types)) + mylog.debug("self.particle_types: %s", self.particle_types) self.particle_types_raw = self.particle_types self.particle_types = tuple(self.particle_types) except (KeyError, TypeError, AttributeError): @@ -486,7 +486,7 @@ def _set_paths(self, handle, path, iteration): encoding = handle.attrs["iterationEncoding"].decode() if "groupBased" in encoding: iterations = list(handle["/data"].keys()) - mylog.info("Found {} iterations in file".format(len(iterations))) + mylog.info("Found %s iterations in file", len(iterations)) elif "fileBased" in encoding: itformat = handle.attrs["iterationFormat"].decode().split("/")[-1] regex = "^" + itformat.replace("%T", "[0-9]+") + "$" @@ -498,12 +498,12 @@ def _set_paths(self, handle, path, iteration): for filename in listdir(path): if match(regex, filename): iterations.append(filename) - mylog.info("Found {} iterations in directory".format(len(iterations))) + mylog.info("Found %s iterations in directory", len(iterations)) if len(iterations) == 0: mylog.warning("No iterations found!") if "groupBased" in encoding and len(iterations) > 1: - mylog.warning("Only chose to load one iteration ({})".format(iteration)) + mylog.warning("Only chose to load one iteration (%s)", iteration) self.base_path = "/data/{}/".format(iteration) try: @@ -512,8 +512,8 @@ def _set_paths(self, handle, path, iteration): except (KeyError): if self.standard_version <= StrictVersion("1.1.0"): mylog.info( - "meshesPath not present in file." - " Assuming file contains no meshes and has a domain extent of 1m^3!" + "meshesPath not present in file. " + "Assuming file contains no meshes and has a domain extent of 1m^3!" ) self.meshes_path = None else: diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index c693307fa67..1a7b69622e6 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -178,7 +178,7 @@ def __init__(self, ds, field_list): self._mag_fields.append(ytname) self.known_other_fields += ((ytname, (unit, aliases, None)),) for i in self.known_other_fields: - mylog.debug("open_pmd - known_other_fields - {}".format(i)) + mylog.debug("open_pmd - known_other_fields - %s", i) except (KeyError, TypeError, AttributeError): pass @@ -213,12 +213,12 @@ def __init__(self, ds, field_list): except (KeyError): if recname != "particlePatches": mylog.info( - "open_pmd - {}_{} does not seem to have unitDimension".format( - pname, recname - ) + "open_pmd - %s_%s does not seem to have unitDimension", + pname, + recname, ) for i in self.known_particle_fields: - mylog.debug("open_pmd - known_particle_fields - {}".format(i)) + mylog.debug("open_pmd - known_particle_fields - %s", i) except (KeyError, TypeError, AttributeError): pass diff --git a/yt/frontends/owls/fields.py b/yt/frontends/owls/fields.py index eda0034fddc..1556dae4ea1 100644 --- a/yt/frontends/owls/fields.py +++ b/yt/frontends/owls/fields.py @@ -343,7 +343,7 @@ def _get_owls_ion_data_dir(self): owls_ion_path = os.path.join(data_dir, "owls_ion_data") if not os.path.exists(owls_ion_path): - mylog.info(txt % (data_url, data_dir)) + mylog.info(txt, data_url, data_dir) fname = data_dir + "/" + data_file download_file(os.path.join(data_url, data_file), fname) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 30dbda5ee9e..e944c16c682 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -58,9 +58,7 @@ def __init__(self, ds, domain_id): field_handlers = [FH(self) for FH in get_field_handlers() if FH.any_exist(ds)] self.field_handlers = field_handlers for fh in field_handlers: - mylog.debug( - "Detected fluid type %s in domain_id=%s" % (fh.ftype, domain_id) - ) + mylog.debug("Detected fluid type %s in domain_id=%s", fh.ftype, domain_id) fh.detect_fields(ds) # self._add_ftype(fh.ftype) @@ -71,7 +69,7 @@ def __init__(self, ds, domain_id): self.particle_handlers = particle_handlers for ph in particle_handlers: mylog.debug( - "Detected particle type %s in domain_id=%s" % (ph.ptype, domain_id) + "Detected particle type %s in domain_id=%s", ph.ptype, domain_id ) ph.read_header() # self._add_ptype(ph.ptype) @@ -609,7 +607,7 @@ def closure(pfilter, data): ) for k in particle_families.keys(): - mylog.info("Adding particle_type: %s" % k) + mylog.info("Adding particle_type: %s", k) self.add_particle_filter("%s" % k) def __repr__(self): @@ -767,8 +765,7 @@ def read_namelist(self): nml = "An error occurred when reading the namelist: %s" % str(e) except (ValueError, StopIteration) as e: mylog.warn( - "Could not parse `namelist.txt` file as it was malformed: %s" - % str(e) + "Could not parse `namelist.txt` file as it was malformed: %s", e ) return diff --git a/yt/frontends/ramses/definitions.py b/yt/frontends/ramses/definitions.py index abc26b394cf..f3ff3cd2007 100644 --- a/yt/frontends/ramses/definitions.py +++ b/yt/frontends/ramses/definitions.py @@ -71,6 +71,6 @@ def ramses_header(hvals): val = ytcfg.getint("ramses-families", key, fallback=None) if val is not None: mylog.info( - "Changing family %s from %s to %s" % (key, particle_families[key], val) + "Changing family %s from %s to %s", key, particle_families[key], val ) particle_families[key] = val diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index a2bc835aee0..b20ad4f94c8 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -399,8 +399,8 @@ def detect_fields(cls, ds): "Metallicity", ] mylog.debug( - "No fields specified by user; automatically setting fields array to %s" - % str(fields) + "No fields specified by user; automatically setting fields array to %s", + fields, ) # Allow some wiggle room for users to add too many variables @@ -409,7 +409,7 @@ def detect_fields(cls, ds): fields.append("var" + str(len(fields))) count_extra += 1 if count_extra > 0: - mylog.debug("Detected %s extra fluid fields." % count_extra) + mylog.debug("Detected %s extra fluid fields.", count_extra) cls.field_list = [(cls.ftype, e) for e in fields] cls.set_detected_fields(ds, fields) diff --git a/yt/frontends/ramses/io.py b/yt/frontends/ramses/io.py index 60697612d4e..3e06a48273f 100644 --- a/yt/frontends/ramses/io.py +++ b/yt/frontends/ramses/io.py @@ -234,7 +234,7 @@ def _read_part_file_descriptor(fname): with open(fname, "r") as f: line = f.readline() tmp = VERSION_RE.match(line) - mylog.debug("Reading part file descriptor %s." % fname) + mylog.debug("Reading part file descriptor %s.", fname) if not tmp: raise YTParticleOutputFormatNotImplemented() @@ -296,7 +296,7 @@ def _read_fluid_file_descriptor(fname): with open(fname, "r") as f: line = f.readline() tmp = VERSION_RE.match(line) - mylog.debug("Reading fluid file descriptor %s." % fname) + mylog.debug("Reading fluid file descriptor %s.", fname) if not tmp: return [] diff --git a/yt/frontends/sph/data_structures.py b/yt/frontends/sph/data_structures.py index 78eddcdf1d5..ac292fbac05 100644 --- a/yt/frontends/sph/data_structures.py +++ b/yt/frontends/sph/data_structures.py @@ -93,7 +93,7 @@ def _generate_kdtree(self, fname): if fname is not None: if os.path.exists(fname): - mylog.info("Loading KDTree from %s" % os.path.basename(fname)) + mylog.info("Loading KDTree from %s", os.path.basename(fname)) kdtree = PyKDTree.from_file(fname) if kdtree.data_version != self.ds._file_hash: mylog.info("Detected hash mismatch, regenerating KDTree") @@ -110,7 +110,7 @@ def _generate_kdtree(self, fname): self._kdtree = None return positions = np.concatenate(positions) - mylog.info("Allocating KDTree for %s particles" % positions.shape[0]) + mylog.info("Allocating KDTree for %s particles", positions.shape[0]) self._kdtree = PyKDTree( positions.astype("float64"), left_edge=self.ds.domain_left_edge, diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 2575d51a890..b488e288204 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -1636,7 +1636,7 @@ def load_hexahedral_mesh( array_values = data[fn] if array_values.size != connectivity.shape[0]: mylog.error( - "Dimensions of array must be one fewer than the" + " coordinate set." + "Dimensions of array must be one fewer than the coordinate set." ) raise RuntimeError grid_left_edges = domain_left_edge @@ -2169,7 +2169,7 @@ def load_unstructured_mesh( array_values = data[fn] if array_values.size != connectivity.shape[0]: mylog.error( - "Dimensions of array must be one fewer than the" + " coordinate set." + "Dimensions of array must be one fewer than the coordinate set." ) raise RuntimeError grid_left_edges = domain_left_edge diff --git a/yt/frontends/swift/data_structures.py b/yt/frontends/swift/data_structures.py index 86da058eeb8..716749a842a 100644 --- a/yt/frontends/swift/data_structures.py +++ b/yt/frontends/swift/data_structures.py @@ -128,9 +128,9 @@ def _parse_parameter_file(self): except KeyError: mylog.warn( ( - "Could not find cosmology information in Parameters," - + " despite having ran with -c signifying a cosmological" - + " run." + "Could not find cosmology information in Parameters, " + "despite having ran with -c signifying a cosmological " + "run." ) ) mylog.info("Setting up as a non-cosmological run. Check this!") diff --git a/yt/frontends/ytdata/utilities.py b/yt/frontends/ytdata/utilities.py index 4b4853cc5d6..985afb0754d 100644 --- a/yt/frontends/ytdata/utilities.py +++ b/yt/frontends/ytdata/utilities.py @@ -65,7 +65,7 @@ def save_as_dataset(ds, filename, data, field_types=None, extra_attrs=None): """ - mylog.info("Saving field data to yt dataset: %s." % filename) + mylog.info("Saving field data to yt dataset: %s.", filename) if extra_attrs is None: extra_attrs = {} diff --git a/yt/funcs.py b/yt/funcs.py index bd3462f40a4..19d7b15ad6a 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -238,14 +238,6 @@ def check_parallel_rank(*args, **kwargs): return check_parallel_rank -def rootloginfo(*args): - from yt.config import ytcfg - - if ytcfg.getint("yt", "__topcomm_parallel_rank") > 0: - return - mylog.info(*args) - - class VisibleDeprecationWarning(UserWarning): """Visible deprecation warning, adapted from NumPy diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index ae5d3979ce4..1b6bf7527f6 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -396,8 +396,10 @@ def _ortho_pixelize( weight_field=chunk[weight].in_units(wounits), ) mylog.info( - "Making a fixed resolution buffer of (%s) %d by %d" - % (weight, size[0], size[1]) + "Making a fixed resolution buffer of (%s) %d by %d", + weight, + size[0], + size[1], ) for chunk in proj_reg.chunks([], "io"): data_source._initialize_projected_units([weight], chunk) diff --git a/yt/startup_tasks.py b/yt/startup_tasks.py index 21e1233984e..6515a9401a1 100644 --- a/yt/startup_tasks.py +++ b/yt/startup_tasks.py @@ -25,7 +25,7 @@ def turn_on_parallelism(): except ImportError as e: mylog.error( "Warning: Attempting to turn on parallelism, " - + "but mpi4py import failed. Try pip install mpi4py." + "but mpi4py import failed. Try pip install mpi4py." ) raise e # Now we have to turn on the parallelism from the perspective of the diff --git a/yt/utilities/amr_kdtree/amr_kdtools.py b/yt/utilities/amr_kdtree/amr_kdtools.py index 893c87453cd..f42e3a36f7f 100644 --- a/yt/utilities/amr_kdtree/amr_kdtools.py +++ b/yt/utilities/amr_kdtree/amr_kdtools.py @@ -4,7 +4,7 @@ def receive_and_reduce(comm, incoming_rank, image, add_to_front): - mylog.debug("Receiving image from %04i" % incoming_rank) + mylog.debug("Receiving image from %04i", incoming_rank) # mylog.debug( '%04i receiving image from %04i'%(self.comm.rank,back.owner)) arr2 = comm.recv_array(incoming_rank, incoming_rank).reshape( (image.shape[0], image.shape[1], image.shape[2]) @@ -35,11 +35,11 @@ def receive_and_reduce(comm, incoming_rank, image, add_to_front): def send_to_parent(comm, outgoing_rank, image): - mylog.debug("Sending image to %04i" % outgoing_rank) + mylog.debug("Sending image to %04i", outgoing_rank) comm.send_array(image, outgoing_rank, tag=comm.rank) def scatter_image(comm, root, image): - mylog.debug("Scattering from %04i" % root) + mylog.debug("Scattering from %04i", root) image = comm.mpi_bcast(image, root=root) return image diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index 8edd54d3046..b4407e5dacc 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -137,7 +137,7 @@ def check_tree(self): # Calculate the Volume vol = self.trunk.kd_sum_volume() - mylog.debug("AMRKDTree volume = %e" % vol) + mylog.debug("AMRKDTree volume = %e", vol) self.trunk.kd_node_check() def sum_cells(self, all_cells=False): @@ -629,7 +629,7 @@ def rebuild_tree_from_array( n.create_split(splitdims[i], splitposs[i]) mylog.info( - "AMRKDTree rebuilt, Final Volume: %e" % self.tree.trunk.kd_sum_volume() + "AMRKDTree rebuilt, Final Volume: %e", self.tree.trunk.kd_sum_volume() ) return self.tree.trunk diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index 224d6f4ea2d..b19075763f4 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -60,10 +60,10 @@ def write_to_file(exc_type, exc, tb): def default_mpi_excepthook(exception_type, exception_value, tb): traceback.print_tb(tb) - mylog.error("%s: %s" % (exception_type.__name__, exception_value)) + mylog.error("%s: %s", exception_type.__name__, exception_value) comm = yt.communication_system.communicators[-1] if comm.size > 1: - mylog.error("Error occured on rank %d." % comm.rank) + mylog.error("Error occured on rank %d.", comm.rank) MPI.COMM_WORLD.Abort(1) diff --git a/yt/visualization/volume_rendering/interactive_vr_helpers.py b/yt/visualization/volume_rendering/interactive_vr_helpers.py index 9ee4d8591bf..54485c43a07 100644 --- a/yt/visualization/volume_rendering/interactive_vr_helpers.py +++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py @@ -70,7 +70,7 @@ def _render_opengl( Please specify a field in create_scene()""" % (field, dobj.ds) ) - mylog.info("Setting default field to %s" % field.__repr__()) + mylog.info("Setting default field to %s", field.__repr__()) if window_size is None: window_size = (1024, 1024) if cam_position is None: diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 09044838325..97fe84c0271 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -299,7 +299,7 @@ def off_axis_projection( data_source.ds.index if item is None: field = data_source.ds.field_list[0] - mylog.info("Setting default field to %s" % field.__repr__()) + mylog.info("Setting default field to %s", field.__repr__()) funits = data_source.ds._get_field_info(item).units diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 3136bb69daf..726f0beff8e 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1937,7 +1937,7 @@ def _setup_box_properties(self, width, center, unit_vectors): dy = self.width[1] offi = self.imi + 0.5 offj = self.imj + 0.5 - mylog.info("Mosaic offset: %f %f" % (offi, offj)) + mylog.info("Mosaic offset: %f %f", offi, offj) global_center = self.center self.center = self.origin self.center += offi * dx * self.orienter.unit_vectors[0] @@ -1981,7 +1981,7 @@ def snapshot(self, fn=None, clip_ratio=None, double_check=False, num_threads=0): self.initialize_source() self.imi, self.imj = xy - mylog.debug("Working on: %i %i" % (self.imi, self.imj)) + mylog.debug("Working on: %i %i", self.imi, self.imj) self._setup_box_properties( self.width, self.center, self.orienter.unit_vectors ) @@ -2370,8 +2370,8 @@ def __init__(self, *args, **kwargs): if self.disparity <= 0.0: self.disparity = self.width[0] / 1000.0 mylog.info( - "Warning: Invalid value of disparity; " - "now reset it to %f" % self.disparity + "Warning: Invalid value of disparity; " "now reset it to %f", + self.disparity, ) def get_sampler_args(self, image): diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 871bfd98862..a06abd214dc 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -451,7 +451,7 @@ def render(self, camera, zbuffer=None): raise RuntimeError for brick in self.volume.traverse(camera.lens.viewpoint): - mylog.debug("Using sampler %s" % self.sampler) + mylog.debug("Using sampler %s", self.sampler) self.sampler(brick, num_threads=self.num_threads) total_cells += np.prod(brick.my_data[0].shape) mylog.debug("Done casting rays") diff --git a/yt/visualization/volume_rendering/transfer_function_helper.py b/yt/visualization/volume_rendering/transfer_function_helper.py index 9fed8b76eb2..d022f6da2fa 100644 --- a/yt/visualization/volume_rendering/transfer_function_helper.py +++ b/yt/visualization/volume_rendering/transfer_function_helper.py @@ -106,8 +106,8 @@ def build_transfer_function(self): """ if self.bounds is None: mylog.info( - "Calculating data bounds. This may take a while." - + " Set the TransferFunctionHelper.bounds to avoid this." + "Calculating data bounds. This may take a while. " + "Set the TransferFunctionHelper.bounds to avoid this." ) self.set_bounds() diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 6983484ebfd..b0d828e1c18 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -734,8 +734,7 @@ def sample_colormap(self, v, w, alpha=None, colormap="gist_stern", col_bounds=No alpha = a self.add_gaussian(v, w, [r, g, b, alpha]) mylog.debug( - "Adding gaussian at %s with width %s and colors %s" - % (v, w, (r, g, b, alpha)) + "Adding gaussian at %s with width %s and colors %s", v, w, (r, g, b, alpha) ) def map_to_colormap( @@ -978,7 +977,7 @@ def __init__( # Now we set up the scattering scat = (johnson_filters[f]["Lchar"] ** -4 / mscat) * anorm tf = TransferFunction(rho_bounds) - mylog.debug("Adding: %s with relative scattering %s" % (f, scat)) + mylog.debug("Adding: %s with relative scattering %s", f, scat) tf.y *= 0.0 tf.y += scat self.add_field_table(tf, 1, weight_field_id=1) diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index 58278ffa375..0324077a5db 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -59,7 +59,7 @@ def create_scene(data_source, field=None, lens_type="plane-parallel"): Please specify a field in create_scene()""" % (field, data_source.ds) ) - mylog.info("Setting default field to %s" % field.__repr__()) + mylog.info("Setting default field to %s", field.__repr__()) if hasattr(data_source.ds.index, "meshes"): source = MeshSource(data_source, field=field) From 323ac4ddd4e99d6b951666736d4e9b03b6cfa21e Mon Sep 17 00:00:00 2001 From: convert-repo Date: Thu, 6 Aug 2020 16:08:27 +0200 Subject: [PATCH 310/653] Third pass ! --- setupext.py | 18 +++++++++--------- yt/data_objects/data_containers.py | 9 +++++---- yt/data_objects/particle_filters.py | 2 +- yt/data_objects/selection_data_containers.py | 2 +- yt/data_objects/static_output.py | 2 +- yt/fields/field_info_container.py | 2 +- yt/frontends/arepo/data_structures.py | 2 +- yt/frontends/athena/data_structures.py | 2 +- yt/frontends/enzo/simulation_handling.py | 2 +- yt/frontends/fits/data_structures.py | 16 ++++++++++------ yt/frontends/flash/data_structures.py | 8 +++++--- yt/frontends/gadget/io.py | 4 ++-- yt/frontends/gadget/simulation_handling.py | 2 +- yt/frontends/gadget_fof/io.py | 6 ++++-- yt/frontends/gamer/data_structures.py | 2 +- yt/frontends/gdf/data_structures.py | 4 ++-- yt/frontends/owls_subfind/io.py | 6 ++++-- yt/frontends/ramses/data_structures.py | 6 ++++-- yt/frontends/ramses/particle_handlers.py | 6 +++--- yt/frontends/sdf/io.py | 10 ++++++---- yt/frontends/stream/data_structures.py | 13 +++++++------ yt/frontends/swift/data_structures.py | 2 +- yt/funcs.py | 2 +- .../coordinates/cartesian_coordinates.py | 4 ++-- yt/visualization/eps_writer.py | 8 ++++---- yt/visualization/fits_image.py | 11 +++++------ yt/visualization/plot_container.py | 4 ++-- yt/visualization/plot_window.py | 12 +++++------- yt/visualization/profile_plotter.py | 5 +++-- .../volume_rendering/render_source.py | 8 ++------ 30 files changed, 95 insertions(+), 85 deletions(-) diff --git a/setupext.py b/setupext.py index fc5809b30b5..18690f25b26 100644 --- a/setupext.py +++ b/setupext.py @@ -111,13 +111,13 @@ def check_for_openmp(): if len(output) == nthreads: using_openmp = True else: - log.warn( + log.warning( "Unexpected number of lines from output of test " "OpenMP program (output was {0})".format(output) ) using_openmp = False else: - log.warn( + log.warning( "Unexpected output from test OpenMP " "program (output was {0})".format(output) ) @@ -129,9 +129,9 @@ def check_for_openmp(): os.chdir(start_dir) if using_openmp: - log.warn("Using OpenMP to compile parallel extensions") + log.warning("Using OpenMP to compile parallel extensions") else: - log.warn( + log.warning( "Unable to compile OpenMP test program so Cython\n" "extensions will be compiled without parallel support" ) @@ -227,18 +227,18 @@ def read_embree_location(): exit_code = p.returncode if exit_code != 0: - log.warn( + log.warning( "Pyembree is installed, but I could not compile Embree " "test code." ) - log.warn("The error message was: ") - log.warn(err) - log.warn(fail_msg) + log.warning("The error message was: ") + log.warning(err) + log.warning(fail_msg) # Clean up file.close() except OSError: - log.warn( + log.warning( "read_embree_location() could not find your C compiler. " "Attempted to use '%s'. " % compiler ) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index ca6ba6dadd9..91c6dab0b30 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -860,7 +860,7 @@ def create_firefly_object( for field in this_ptype_fields: if field not in fields_to_include: mylog.warning( - "detected (but did not request) {} {}".format(ptype, field) + "detected (but did not request) %s %s", ptype, field ) ## you must have velocities (and they must be named "Velocities") @@ -1755,11 +1755,12 @@ def _generate_fields(self, fields_to_generate): raise YTDimensionalityError(fi.dimensions, dimensions) fi.units = units self.field_data[field] = self.ds.arr(fd, units) - msg = ( + mylog.warning( "Field %s was added without specifying units, " - "assuming units are %s" + "assuming units are %s", + fi.name, + units, ) - mylog.warning(msg % (fi.name, units)) try: fd.convert_to_units(fi.units) except AttributeError: diff --git a/yt/data_objects/particle_filters.py b/yt/data_objects/particle_filters.py index 28295ac87f3..40f9f08fe7d 100644 --- a/yt/data_objects/particle_filters.py +++ b/yt/data_objects/particle_filters.py @@ -120,7 +120,7 @@ def add_particle_filter(name, function, requires=None, filtered_type="all"): requires = [] filter = ParticleFilter(name, function, requires, filtered_type) if filter_registry.get(name, None) is not None: - mylog.warning("The %s particle filter already exists. Overriding." % name) + mylog.warning("The %s particle filter already exists. Overriding.", name) filter_registry[name] = filter diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index d7b8760665f..4c501862e5c 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -239,7 +239,7 @@ def __init__( if (self.start_point < self.ds.domain_left_edge).any() or ( self.end_point > self.ds.domain_right_edge ).any(): - mylog.warn( + mylog.warning( "Ray start or end is outside the domain. " "Returned data will only be for the ray section inside the domain." ) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index cddac19ef77..a45e3f62c5a 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1500,7 +1500,7 @@ def add_deposited_particle_field( if method == "count": field_name = "%s_count" % ptype if ("deposit", field_name) in self.field_info: - mylog.warning("The deposited field %s already exists" % field_name) + mylog.warning("The deposited field %s already exists", field_name) return ("deposit", field_name) else: units = "dimensionless" diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index 561371656b2..102bacf743b 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -214,7 +214,7 @@ def setup_fluid_aliases(self, ftype="gas"): and units != 1.0 ): mylog.warning( - "Cannot interpret units: %s * %s, " + "setting to dimensionless.", + "Cannot interpret units: %s * %s, setting to dimensionless.", units, args[0], ) diff --git a/yt/frontends/arepo/data_structures.py b/yt/frontends/arepo/data_structures.py index 649d6057444..fc6ca2a877d 100644 --- a/yt/frontends/arepo/data_structures.py +++ b/yt/frontends/arepo/data_structures.py @@ -68,7 +68,7 @@ def _get_uvals(self): if unit in handle["/Header"].attrs: uvals[unit] = handle["/Header"].attrs[unit] else: - mylog.warning("Arepo header is missing %s!" % unit) + mylog.warning("Arepo header is missing %s!", unit) missing = True handle.close() if missing: diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index a50ec2fce73..fbd8a3241de 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -493,7 +493,7 @@ def __init__( if not already_warned: mylog.warning( "Supplying unit conversions from the parameters dict is deprecated, " - + "and will be removed in a future release. Use units_override instead." + "and will be removed in a future release. Use units_override instead." ) already_warned = True units_override[k] = self.specified_parameters.pop(k) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index f4807fbf6f7..e557dbacd1c 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -567,7 +567,7 @@ def _calculate_simulation_bounds(self): if self.final_time is None: mylog.warning( "Simulation %s has no stop time set, stopping condition " - + "will be based only on cycles.", + "will be based only on cycles.", self.parameter_filename, ) diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 1d814a60f14..72b6f039590 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -73,7 +73,7 @@ def _guess_name_from_units(self, units): for k, v in field_from_unit.items(): if k in units: mylog.warning( - "Guessing this is a %s field based on its units of %s." % (v, k) + "Guessing this is a %s field based on its units of %s.", v, k ) return v return None @@ -151,9 +151,11 @@ def _detect_output_fields(self): dup_field_index[fname] = 1 mylog.warning( "This field has the same name as a previously loaded " - + "field. Changing the name from %s to %s_%d. To avoid " - % (fname, fname, dup_field_index[fname]) - + " this, change one of the BTYPE header keywords." + "field. Changing the name from %s to %s_%d. To avoid " + "this, change one of the BTYPE header keywords.", + fname, + fname, + dup_field_index[fname], ) fname += "_%d" % (dup_field_index[fname]) for k in range(naxis4): @@ -600,8 +602,10 @@ def _set_code_unit_attributes(self): # Give up otherwise u = self.quan(1.0, cgs) mylog.warning( - "No unit for %s found. Assuming 1.0 code_%s = 1.0 %s" - % (unit, unit, cgs) + "No unit for %s found. Assuming 1.0 code_%s = 1.0 %s", + unit, + unit, + cgs, ) setdefaultattr(self, "%s_unit" % unit, u) diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 955a5caf9a9..2af37394ebe 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -209,8 +209,9 @@ def __init__( self._particle_handle = self._handle mylog.warning( "%s and %s are not at the same time. " - % (self.particle_filename, filename) - + "This particle file will not be used." + "This particle file will not be used.", + self.particle_filename, + filename, ) # These should be explicitly obtained from the file, but for now that @@ -403,7 +404,8 @@ def _parse_parameter_file(self): if dle[d] == dre[d]: mylog.warning( "Identical domain left edge and right edges " - "along dummy dimension (%i), attempting to read anyway" % d + "along dummy dimension (%i), attempting to read anyway", + d, ) dre[d] = dle[d] + 1.0 if self.dimensionality < 3 and self.geometry == "cylindrical": diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 8f74bc678e3..c0168d38a9d 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -538,8 +538,8 @@ def _calculate_field_offsets( possible.append(ptype) mylog.warning( "Your Gadget-2 file may have extra " - + "columns or different precision! " - + "(%s diff => %s?)", + "columns or different precision! " + "(%s diff => %s?)", diff, possible, ) diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 83195670625..b5a62b644f2 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -284,7 +284,7 @@ def get_time_series( if len(init_outputs) == 0 and len(my_outputs) > 0: mylog.warning( "Could not find any datasets. " - + "Check the value of OutputDir in your parameter file." + "Check the value of OutputDir in your parameter file." ) DatasetSeries.__init__( diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index 72f58d81092..5bf4bd22917 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -379,8 +379,10 @@ def subfind_field_list(fh, ptype, pcount): offset_fields.append(fname) else: mylog.warning( - "Cannot add field (%s, %s) with size %d." - % (ptype, fh[field].name, fh[field].size) + "Cannot add field (%s, %s) with size %d.", + ptype, + fh[field].name, + fh[field].size, ) continue return fields, offset_fields diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index d9469991b27..e45aa626315 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -262,7 +262,7 @@ def _set_code_unit_attributes(self): if len(self.units_override) == 0: mylog.warning( "Cannot determine code units ==> " - + "Use units_override to specify the units" + "Use units_override to specify the units" ) for unit, value, cgs in [ diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index ea775907d5f..859b5b7e7b9 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -221,8 +221,8 @@ def _set_code_unit_attributes(self): if unit_name in h5f["/field_types"]: if unit_name in self.field_units: mylog.warning( - "'field_units' was overridden by 'dataset_units/%s'" - % (unit_name) + "'field_units' was overridden by 'dataset_units/%s'", + unit_name, ) self.field_units[unit_name] = str(unit) else: diff --git a/yt/frontends/owls_subfind/io.py b/yt/frontends/owls_subfind/io.py index d8a27da11db..0aee50920e4 100644 --- a/yt/frontends/owls_subfind/io.py +++ b/yt/frontends/owls_subfind/io.py @@ -221,8 +221,10 @@ def subfind_field_list(fh, ptype, pcount): offset_fields.append(fname) else: mylog.warning( - "Cannot add field (%s, %s) with size %d." - % (ptype, fh[field].name, fh[field].size) + "Cannot add field (%s, %s) with size %d.", + ptype, + fh[field].name, + fh[field].size, ) continue return fields, offset_fields diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index e944c16c682..5b556d0c88f 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -204,7 +204,9 @@ def __init__( if num_ghost_zones > 0: if not all(ds.periodicity): - mylog.warn("Ghost zones will wrongly assume the domain to be periodic.") + mylog.warning( + "Ghost zones will wrongly assume the domain to be periodic." + ) # Create a base domain *with no self._base_domain.fwidth base_domain = RAMSESDomainSubset( ds.all_data(), domain, ds, over_refine_factor @@ -764,7 +766,7 @@ def read_namelist(self): except ImportError as e: nml = "An error occurred when reading the namelist: %s" % str(e) except (ValueError, StopIteration) as e: - mylog.warn( + mylog.warning( "Could not parse `namelist.txt` file as it was malformed: %s", e ) return diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 340b6a413bc..ee16d5d1e24 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -271,12 +271,12 @@ def read_header(self): fd.close() if iextra > 0 and not self.ds._warned_extra_fields["io"]: - w = ( + mylog.warning( "Detected %s extra particle fields assuming kind " "`double`. Consider using the `extra_particle_fields` " - "keyword argument if you have unexpected behavior." + "keyword argument if you have unexpected behavior.", + iextra, ) - mylog.warning(w % iextra) self.ds._warned_extra_fields["io"] = True self.field_offsets = field_offsets diff --git a/yt/frontends/sdf/io.py b/yt/frontends/sdf/io.py index 4398acf44bd..f13370d8637 100644 --- a/yt/frontends/sdf/io.py +++ b/yt/frontends/sdf/io.py @@ -92,8 +92,9 @@ def _count_particles(self, data_file): pcount = self._handle["x"].size if pcount > 1e9: mylog.warning( - "About to load %i particles into memory. " % (pcount) - + "You may want to consider a midx-enabled load" + "About to load %i particles into memory. " + "You may want to consider a midx-enabled load", + pcount, ) return {"dark_matter": pcount} @@ -236,8 +237,9 @@ def _count_particles(self, data_file): pcount_estimate = self.ds.midx.get_nparticles_bbox(dle, dre) if pcount_estimate > 1e9: mylog.warning( - "Filtering %i particles to find total." % pcount_estimate - + " You may want to reconsider your bounding box." + "Filtering %i particles to find total. " + "You may want to reconsider your bounding box.", + pcount_estimate, ) pcount = 0 for dd in self.ds.midx.iter_bbox_data(dle, dre, ["x"]): diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index b488e288204..1270658d5a9 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -454,10 +454,6 @@ def assign_particle_data(ds, pdata, bbox): num_particles = particle_grid_inds.size num_unassigned = num_particles - assigned_particles.size if num_unassigned > 0: - m = ( - "Discarding %s particles (out of %s) that are outside " - "bounding box. " - ) eps = np.finfo(x.dtype).eps s = np.array( [ @@ -471,8 +467,13 @@ def assign_particle_data(ds, pdata, bbox): [min(bbox[1, 0], s[1, 0]), max(bbox[1, 1], s[1, 1])], [min(bbox[2, 0], s[2, 0]), max(bbox[2, 1], s[2, 1])], ] - m += "Set bbox=%s to avoid this in the future." - mylog.warning(m % (num_unassigned, num_particles, sug_bbox)) + mylog.warning( + "Discarding %s particles (out of %s) that are outside " + "bounding box. Set bbox=%s to avoid this in the future.", + num_unassigned, + num_particles, + sug_bbox, + ) particle_grid_inds = particle_grid_inds[assigned_particles] x = x[assigned_particles] y = y[assigned_particles] diff --git a/yt/frontends/swift/data_structures.py b/yt/frontends/swift/data_structures.py index 716749a842a..f40a780e191 100644 --- a/yt/frontends/swift/data_structures.py +++ b/yt/frontends/swift/data_structures.py @@ -126,7 +126,7 @@ def _parse_parameter_file(self): # This is "little h" self.hubble_constant = float(parameters["Cosmology:h"]) except KeyError: - mylog.warn( + mylog.warning( ( "Could not find cosmology information in Parameters, " "despite having ran with -c signifying a cosmological " diff --git a/yt/funcs.py b/yt/funcs.py index 19d7b15ad6a..6c0365304c3 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -918,7 +918,7 @@ def get_image_suffix(name): if suffix in supported_suffixes or suffix == "": return suffix else: - mylog.warning("Unsupported image suffix requested (%s)" % suffix) + mylog.warning("Unsupported image suffix requested (%s)", suffix) return "" diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 1b6bf7527f6..32b8f0d71a4 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -201,7 +201,7 @@ def pixelize( elif field_data.shape[1] == 27: # hexahedral mylog.warning( - "High order elements not yet supported, " + "dropping to 1st order." + "High order elements not yet supported, dropping to 1st order." ) field_data = field_data[:, 0:8] indices = indices[:, 0:8] @@ -257,7 +257,7 @@ def pixelize_line(self, field, start_point, end_point, npoints): if field_data.shape[1] == 27: # hexahedral mylog.warning( - "High order elements not yet supported, " + "dropping to 1st order." + "High order elements not yet supported, dropping to 1st order." ) field_data = field_data[:, 0:8] indices = indices[:, 0:8] diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 2b2b517773c..9f2ddc1ef56 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -607,12 +607,12 @@ def insert_image_yt(self, plot, field=None, pos=(0, 0), scale=1.0): if field is None: self.field = list(plot.plots.keys())[0] mylog.warning( - "No field specified. Choosing first field (%s)" % str(self.field) + "No field specified. Choosing first field (%s)", self.field ) else: self.field = plot.data_source._determine_fields(field)[0] if self.field not in plot.plots.keys(): - raise RuntimeError("Field '%s' does not exist!" % str(self.field)) + raise RuntimeError("Field '%s' does not exist!", self.field) if isinstance(plot, PlotWindow): plot.hide_colorbar() plot.hide_axes() @@ -1447,8 +1447,8 @@ def multiplot( xpos = xpos0 else: mylog.warning( - "Unknown colorbar location %s. " - "No colorbar displayed." % orientation + "Unknown colorbar location %s. No colorbar displayed.", + orientation, ) orientation = None # Marker for interior plot diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 2e7095d6654..7baba6435a3 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -397,16 +397,15 @@ def _set_units(self, ds, base_units): if uq is not None and uq.units.is_code_unit: mylog.warning( - "Cannot use code units of '%s' " % uq.units - + "when creating a FITSImageData instance! " - "Converting to a cgs equivalent." + "Cannot use code units of '%s' " + "when creating a FITSImageData instance! " + "Converting to a cgs equivalent.", + uq.units, ) uq.convert_to_cgs() if attr == "length_unit" and uq.value != 1.0: - mylog.warning( - "Converting length units " "from %s to %s." % (uq, uq.units) - ) + mylog.warning("Converting length units " "from %s to %s.", uq, uq.units) uq = YTQuantity(1.0, uq.units) setattr(self, attr, uq) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 92abaabef92..abdda1bd9a6 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -911,8 +911,8 @@ def _sanitize_units(z, _field): except AttributeError: # only certain subclasses have a frb attribute they can rely on for inspecting units mylog.warning( - "%s class doesn't support zmin/zmax set as tuples or YTQuantity" - % self.__class__.__name__ + "%s class doesn't support zmin/zmax set as tuples or YTQuantity", + self.__class__.__name__, ) z = z.value return z diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 4620665d63b..834483cd74f 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -855,7 +855,7 @@ def _setup_origin(self): elif origin[2] == "native": return (self.ds.quan(0.0, "code_length"), self.ds.quan(0.0, "code_length")) else: - mylog.warning("origin = {0}".format(origin)) + mylog.warning("origin = %s", origin) msg = ( 'origin keyword "{0}" not recognized, must declare "domain" ' 'or "center" as the last term in origin.' @@ -869,7 +869,7 @@ def _setup_origin(self): elif origin[0] == "center": yc = (yllim + yrlim) / 2.0 else: - mylog.warning("origin = {0}".format(origin)) + mylog.warning("origin = %s", origin) msg = ( 'origin keyword "{0}" not recognized, must declare "lower" ' '"upper" or "center" as the first term in origin.' @@ -884,7 +884,7 @@ def _setup_origin(self): elif origin[1] == "center": xc = (xllim + xrlim) / 2.0 else: - mylog.warning("origin = {0}".format(origin)) + mylog.warning("origin = %s", origin) msg = ( 'origin keyword "{0}" not recognized, must declare "left" ' '"right" or "center" as the second term in origin.' @@ -2376,22 +2376,20 @@ def SlicePlot(ds, normal=None, fields=None, axis=None, *args, **kwargs): if iterable(normal) and not isinstance(normal, str): # OffAxisSlicePlot has hardcoded origin; remove it if in kwargs if "origin" in kwargs: - msg = ( + mylog.warning( "Ignoring 'origin' keyword as it is ill-defined for " "an OffAxisSlicePlot object." ) - mylog.warning(msg) del kwargs["origin"] return OffAxisSlicePlot(ds, normal, fields, *args, **kwargs) else: # north_vector not used in AxisAlignedSlicePlots; remove it if in kwargs if "north_vector" in kwargs: - msg = ( + mylog.warning( "Ignoring 'north_vector' keyword as it is ill-defined for " "an AxisAlignedSlicePlot object." ) - mylog.warning(msg) del kwargs["north_vector"] return AxisAlignedSlicePlot(ds, normal, fields, *args, **kwargs) diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index bec52d51dfa..87d5e2c2c27 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -1074,8 +1074,9 @@ def _setup_plots(self): positive_values = data[data > 0.0] if len(positive_values) == 0: mylog.warning( - "Profiled field %s has no positive " - "values. Max = %f." % (f, np.nanmax(data)) + "Profiled field %s has no positive " "values. Max = %f.", + f, + np.nanmax(data), ) mylog.warning("Switching to linear colorbar scaling.") zmin = np.nanmin(data) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index a06abd214dc..62915fde571 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -631,9 +631,7 @@ def build_volume_embree(self): # to 1st order here, for now. if indices.shape[1] == 27: # hexahedral - mylog.warning( - "27-node hexes not yet supported, " + "dropping to 1st order." - ) + mylog.warning("27-node hexes not yet supported, dropping to 1st order.") field_data = field_data[:, 0:8] indices = indices[:, 0:8] @@ -664,9 +662,7 @@ def build_volume_bvh(self): # low-order geometry. if indices.shape[1] == 27: # hexahedral - mylog.warning( - "27-node hexes not yet supported, " + "dropping to 1st order." - ) + mylog.warning("27-node hexes not yet supported, dropping to 1st order.") field_data = field_data[:, 0:8] indices = indices[:, 0:8] From 420707513daa2ffa1986e4fcee7d61532c69e65c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 16:09:39 +0200 Subject: [PATCH 311/653] Adding myself to git-blame-ignore-revs --- .git-blame-ignore-revs | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index a88bbbf6bb1..b174b700f8b 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -2,8 +2,13 @@ 7edfcee093cca277307aabdb180e0ffc69768291 81418e459f16c48d6b7a75d6ef8035dfe9651b39 -# transisiton to black +# transition to black ebadee629414aed2c7b6526e22a419205329ec38 # automated trailing whitespace removal 3ee548b04a41dfbc009921c492fba6a0682651ca + +# converting to f-strings +ad898e8e3954bc348daaa449d5ed73db778785e9 +ef51ad5199692afcf1a8ab491aa115c00c423113 +323ac4ddd4e99d6b951666736d4e9b03b6cfa21e \ No newline at end of file From d501227e39707291c1998c3a19db4c7e8748934f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 16:19:48 +0200 Subject: [PATCH 312/653] Fix setupext.py --- setupext.py | 27 ++++++++++++++------------- 1 file changed, 14 insertions(+), 13 deletions(-) diff --git a/setupext.py b/setupext.py index 18690f25b26..463e20791dd 100644 --- a/setupext.py +++ b/setupext.py @@ -111,15 +111,15 @@ def check_for_openmp(): if len(output) == nthreads: using_openmp = True else: - log.warning( + log.warn( "Unexpected number of lines from output of test " - "OpenMP program (output was {0})".format(output) + "OpenMP program (output was %s)", + output, ) using_openmp = False else: - log.warning( - "Unexpected output from test OpenMP " - "program (output was {0})".format(output) + log.warn( + "Unexpected output from test OpenMP " "program (output was %s)", output ) using_openmp = False @@ -129,9 +129,9 @@ def check_for_openmp(): os.chdir(start_dir) if using_openmp: - log.warning("Using OpenMP to compile parallel extensions") + log.warn("Using OpenMP to compile parallel extensions") else: - log.warning( + log.warn( "Unable to compile OpenMP test program so Cython\n" "extensions will be compiled without parallel support" ) @@ -227,20 +227,21 @@ def read_embree_location(): exit_code = p.returncode if exit_code != 0: - log.warning( + log.warn( "Pyembree is installed, but I could not compile Embree " "test code." ) - log.warning("The error message was: ") - log.warning(err) - log.warning(fail_msg) + log.warn("The error message was: ") + log.warn(err) + log.warn(fail_msg) # Clean up file.close() except OSError: - log.warning( + log.warn( "read_embree_location() could not find your C compiler. " - "Attempted to use '%s'. " % compiler + "Attempted to use '%s'.", + compiler, ) return False From 4b381dce230e2f80291960f68e1146576c86601d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 6 Aug 2020 17:15:13 +0200 Subject: [PATCH 313/653] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- setupext.py | 2 +- yt/visualization/volume_rendering/old_camera.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/setupext.py b/setupext.py index 463e20791dd..193495aa953 100644 --- a/setupext.py +++ b/setupext.py @@ -119,7 +119,7 @@ def check_for_openmp(): using_openmp = False else: log.warn( - "Unexpected output from test OpenMP " "program (output was %s)", output + "Unexpected output from test OpenMP program (output was %s)", output ) using_openmp = False diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 726f0beff8e..72de7d0d8d4 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -2370,7 +2370,7 @@ def __init__(self, *args, **kwargs): if self.disparity <= 0.0: self.disparity = self.width[0] / 1000.0 mylog.info( - "Warning: Invalid value of disparity; " "now reset it to %f", + "Warning: Invalid value of disparity; now reset it to %f", self.disparity, ) From 0431c7fac5a253dbea5bea2f23dfbcbd471bebe7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 19:46:50 +0200 Subject: [PATCH 314/653] remove unused module --- yt/geometry/object_finding_mixin.py | 289 ---------------------------- 1 file changed, 289 deletions(-) delete mode 100644 yt/geometry/object_finding_mixin.py diff --git a/yt/geometry/object_finding_mixin.py b/yt/geometry/object_finding_mixin.py deleted file mode 100644 index 4688d7c61d3..00000000000 --- a/yt/geometry/object_finding_mixin.py +++ /dev/null @@ -1,289 +0,0 @@ -import numpy as np - -from yt.config import ytcfg -from yt.funcs import ensure_list, ensure_numpy_array, mylog -from yt.geometry.grid_container import GridTree, MatchPointsToGrids -from yt.utilities.exceptions import YTTooParallel -from yt.utilities.lib.misc_utilities import get_box_grids_below_level -from yt.utilities.physical_ratios import HUGE - - -class ObjectFindingMixin: - def find_ray_grids(self, coord, axis): - """ - Returns the (objects, indices) of grids that an (x,y) ray intersects - along *axis* - """ - # Let's figure out which grids are on the slice - mask = np.ones(self.num_grids) - # So if gRE > coord, we get a mask, if not, we get a zero - # if gLE > coord, we get a zero, if not, mask - # Thus, if the coordinate is between the two edges, we win! - xax = self.ds.coordinates.x_axis[axis] - yax = self.ds.coordinates.y_axis[axis] - np.choose(np.greater(self.grid_right_edge[:, xax], coord[0]), (0, mask), mask) - np.choose(np.greater(self.grid_left_edge[:, xax], coord[0]), (mask, 0), mask) - np.choose(np.greater(self.grid_right_edge[:, yax], coord[1]), (0, mask), mask) - np.choose(np.greater(self.grid_left_edge[:, yax], coord[1]), (mask, 0), mask) - ind = np.where(mask == 1) - return self.grids[ind], ind - - def find_max(self, field, finest_levels=3): - """ - Returns (value, center) of location of maximum for a given field. - """ - if (field, finest_levels) in self._max_locations: - return self._max_locations[(field, finest_levels)] - mv, pos = self.find_max_cell_location(field, finest_levels) - self._max_locations[(field, finest_levels)] = (mv, pos) - return mv, pos - - def find_max_cell_location(self, field, finest_levels=3): - if finest_levels is not False: - # This prevents bad values for the case that the number of grids to - # search is smaller than the number of processors being applied to - # the task, by - nproc = ytcfg.getint("yt", "__topcomm_parallel_size") - while True: - gi = (self.grid_levels >= self.max_level - finest_levels).ravel() - if gi.sum() >= nproc: - break - elif finest_levels >= self.max_level: - raise YTTooParallel - else: - finest_levels += 1 - - source = self.grid_collection([0.0] * 3, self.grids[gi]) - else: - source = self.all_data() - mylog.debug( - "Searching %s grids for maximum value of %s", len(source._grids), field - ) - max_val, mx, my, mz = source.quantities["MaxLocation"](field) - mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", max_val, mx, my, mz) - self.parameters["Max%sValue" % (field)] = max_val - self.parameters["Max%sPos" % (field)] = "%s" % ((mx, my, mz),) - return max_val, np.array((mx, my, mz), dtype="float64") - - def find_min(self, field): - """ - Returns (value, center) of location of minimum for a given field - """ - gI = np.where(self.grid_levels >= 0) # Slow but pedantic - minVal = HUGE - for grid in self.grids[gI[0]]: - mylog.debug("Checking %s (level %s)", grid.id, grid.Level) - val, coord = grid.find_min(field) - if val < minVal: - minCoord = coord - minVal = val - minGrid = grid - mc = np.array(minCoord) - pos = minGrid.get_position(mc) - mylog.info( - "Min Value is %0.5e at %0.16f %0.16f %0.16f in grid %s at level %s", - minVal, - pos[0], - pos[1], - pos[2], - minGrid, - minGrid.Level, - ) - self.center = pos - self.parameters["Min%sValue" % (field)] = minVal - self.parameters["Min%sPos" % (field)] = "%s" % (pos) - return minVal, pos - - def find_point(self, coord): - """ - Returns the (objects, indices) of grids containing an (x,y,z) point - """ - mask = np.ones(self.num_grids) - for i in range(len(coord)): - np.choose(np.greater(self.grid_left_edge[:, i], coord[i]), (mask, 0), mask) - np.choose(np.greater(self.grid_right_edge[:, i], coord[i]), (0, mask), mask) - ind = np.where(mask == 1) - return self.grids[ind], ind - - def find_points(self, x, y, z): - """ - Returns the (objects, indices) of leaf grids containing a number of (x,y,z) points - """ - x = ensure_numpy_array(x) - y = ensure_numpy_array(y) - z = ensure_numpy_array(z) - if not len(x) == len(y) == len(z): - raise AssertionError("Arrays of indices must be of the same size") - - grid_tree = self.get_grid_tree() - pts = MatchPointsToGrids(grid_tree, len(x), x, y, z) - ind = pts.find_points_in_tree() - return self.grids[ind], ind - - def find_field_value_at_point(self, fields, coord): - r"""Find the value of fields at a point. - - Returns the values [field1, field2,...] of the fields at the given - (x,y,z) point. Returns a list of field values in the same order - as the input *fields*. - - Parameters - ---------- - fields : string or list of strings - The field(s) that will be returned. - - coord : list or array of floats - The location for which field values will be returned. - - Examples - -------- - >>> ds.h.find_field_value_at_point(['Density', 'Temperature'], - [0.4, 0.3, 0.8]) - [2.1489e-24, 1.23843e4] - """ - # Get the most-refined grid at this coordinate. - this = self.find_point(coord)[0][-1] - cellwidth = (this.RightEdge - this.LeftEdge) / this.ActiveDimensions - mark = np.zeros(3).astype("int") - # Find the index for the cell containing this point. - for dim in range(len(coord)): - mark[dim] = int((coord[dim] - this.LeftEdge[dim]) / cellwidth[dim]) - out = [] - fields = ensure_list(fields) - # Pull out the values and add it to the out list. - for field in fields: - out.append(this[field][mark[0], mark[1], mark[2]]) - return out - - def find_slice_grids(self, coord, axis): - """ - Returns the (objects, indices) of grids that a slice intersects along - *axis* - """ - # Let's figure out which grids are on the slice - mask = np.ones(self.num_grids) - # So if gRE > coord, we get a mask, if not, we get a zero - # if gLE > coord, we get a zero, if not, mask - # Thus, if the coordinate is between the edges, we win! - np.choose(np.greater(self.grid_right_edge[:, axis], coord), (0, mask), mask) - np.choose(np.greater(self.grid_left_edge[:, axis], coord), (mask, 0), mask) - ind = np.where(mask == 1) - return self.grids[ind], ind - - def find_sphere_grids(self, center, radius): - """ - Returns objects, indices of grids within a sphere - """ - centers = (self.grid_right_edge + self.grid_left_edge) / 2.0 - long_axis = np.maximum.reduce(self.grid_right_edge - self.grid_left_edge, 1) - t = np.abs(centers - center) - DW = self.dataset.domain_right_edge - self.dataset.domain_left_edge - np.minimum(t, np.abs(DW - t), t) - dist = np.sqrt(np.sum((t ** 2.0), axis=1)) - gridI = np.where(dist < (radius + long_axis)) - return self.grids[gridI], gridI - - def get_box_grids(self, left_edge, right_edge): - """ - Gets back all the grids between a left edge and right edge - """ - eps = np.finfo(np.float64).eps - grid_i = np.where( - ( - np.all((self.grid_right_edge - left_edge) > eps, axis=1) - & np.all((right_edge - self.grid_left_edge) > eps, axis=1) - ) - ) - - return self.grids[grid_i], grid_i - - def get_periodic_box_grids(self, left_edge, right_edge): - mask = np.zeros(self.grids.shape, dtype="bool") - dl = self.dataset.domain_left_edge - dr = self.dataset.domain_right_edge - left_edge = np.array(left_edge) - right_edge = np.array(right_edge) - dw = dr - dl - left_dist = left_edge - dl - db = right_edge - left_edge - for off_x in [-1, 0, 1]: - nle = left_edge.copy() - nle[0] = (dw[0] * off_x + dl[0]) + left_dist[0] - for off_y in [-1, 0, 1]: - nle[1] = (dw[1] * off_y + dl[1]) + left_dist[1] - for off_z in [-1, 0, 1]: - nle[2] = (dw[2] * off_z + dl[2]) + left_dist[2] - nre = nle + db - g, gi = self.get_box_grids(nle, nre) - mask[gi] = True - return self.grids[mask], np.where(mask) - - def get_box_grids_below_level(self, left_edge, right_edge, level, min_level=0): - # We discard grids if they are ABOVE the level - mask = np.empty(self.grids.size, dtype="int32") - get_box_grids_below_level( - left_edge, - right_edge, - level, - self.grid_left_edge, - self.grid_right_edge, - self.grid_levels.astype("int32"), - mask, - min_level, - ) - mask = mask.astype("bool") - return self.grids[mask], np.where(mask) - - def get_periodic_box_grids_below_level( - self, left_edge, right_edge, level, min_level=0 - ): - mask = np.zeros(self.grids.shape, dtype="bool") - dl = self.dataset.domain_left_edge - dr = self.dataset.domain_right_edge - left_edge = np.array(left_edge) - right_edge = np.array(right_edge) - dw = dr - dl - left_dist = left_edge - dl - db = right_edge - left_edge - for off_x in [-1, 0, 1]: - nle = left_edge.copy() - nle[0] = (dw[0] * off_x + dl[0]) + left_dist[0] - for off_y in [-1, 0, 1]: - nle[1] = (dw[1] * off_y + dl[1]) + left_dist[1] - for off_z in [-1, 0, 1]: - nle[2] = (dw[2] * off_z + dl[2]) + left_dist[2] - nre = nle + db - g, gi = self.get_box_grids_below_level(nle, nre, level, min_level) - mask[gi] = True - return self.grids[mask], np.where(mask) - - def get_grid_tree(self): - - left_edge = np.zeros((self.num_grids, 3)) - right_edge = np.zeros((self.num_grids, 3)) - level = np.zeros((self.num_grids), dtype="int64") - parent_ind = np.zeros((self.num_grids), dtype="int64") - num_children = np.zeros((self.num_grids), dtype="int64") - dimensions = np.zeros((self.num_grids, 3), dtype="int32") - - for i, grid in enumerate(self.grids): - - left_edge[i, :] = grid.LeftEdge - right_edge[i, :] = grid.RightEdge - level[i] = grid.Level - if grid.Parent is None: - parent_ind[i] = -1 - else: - parent_ind[i] = grid.Parent.id - grid.Parent._id_offset - num_children[i] = np.int64(len(grid.Children)) - dimensions[i, :] = grid.ActiveDimensions - - return GridTree( - self.num_grids, - left_edge, - right_edge, - dimensions, - parent_ind, - level, - num_children, - ) From 5b0b7940230875027adc71bb0cc68eccf7056781 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 14 Jul 2020 10:16:18 +0200 Subject: [PATCH 315/653] fix: make Dataset.find_min and find_max methods two special cases of a general _find_extremum method to fix inconsistent behaviours --- yt/data_objects/static_output.py | 58 ++++++++++++++------------------ 1 file changed, 26 insertions(+), 32 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index fcb06885539..6f1a87a9b28 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -894,47 +894,41 @@ def _add_object_class(self, name, base): obj.__doc__ = base.__doc__ setattr(self, name, obj) - def find_max(self, field): + def _find_extremum(self, field, ext, source=None, to_array=True): + ext = ext.lower() + if source is None: + source = self.all_data() + method = { + "min": source.quantities.min_location, + "max": source.quantities.max_location, + }[ext] + val, x1, x2, x3 = method(field) + coords = [x1, x2, x3] + mylog.info("%s value is %0.5e at %0.16f %0.16f %0.16f", ext, val, *coords) + if to_array: + # This is a hack to fix the fact that some non-cartesian datasets have + # dimensionless quantities, and we can't yet handle that. + alt_coords = [] + for x in coords: + alt_coords.append( + self.quan(x.v, "code_length") if x.units.is_dimensionless else x + ) + coords = self.arr(alt_coords, dtype="float64").to("code_length") + return val, coords + + def find_max(self, field, source=None, to_array=True): """ Returns (value, location) of the maximum of a given field. """ mylog.debug("Searching for maximum value of %s", field) - source = self.all_data() - max_val, mx, my, mz = source.quantities.max_location(field) - # This is a hack to fix the fact that some non-cartesian datasets have - # dimensionless quantities, and we can't yet handle that. - if mx.units.is_dimensionless: - mx = self.quan(mx.v, "code_length") - if my.units.is_dimensionless: - my = self.quan(my.v, "code_length") - if mz.units.is_dimensionless: - mz = self.quan(mz.v, "code_length") - center = self.arr([mx, my, mz], dtype="float64").to("code_length") - mylog.info( - "Max Value is %0.5e at %0.16f %0.16f %0.16f", - max_val, - center[0], - center[1], - center[2], - ) - return max_val, center + return self._find_extremum(field, "max", source=source, to_array=to_array) - def find_min(self, field): + def find_min(self, field, source=None, to_array=True): """ Returns (value, location) for the minimum of a given field. """ mylog.debug("Searching for minimum value of %s", field) - source = self.all_data() - min_val, mx, my, mz = source.quantities.min_location(field) - center = self.arr([mx, my, mz], dtype="float64").to("code_length") - mylog.info( - "Min Value is %0.5e at %0.16f %0.16f %0.16f", - min_val, - center[0], - center[1], - center[2], - ) - return min_val, center + return self._find_extremum(field, "min", source=source, to_array=to_array) def find_field_values_at_point(self, fields, coords): """ From 9de2c661f2df681b218efaa07ef5f198fc297191 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 26 Jul 2020 09:30:06 +0200 Subject: [PATCH 316/653] refactor: throw an error in case of dimensionless coords. Add/improves docstrings. --- yt/data_objects/static_output.py | 42 ++++++++++++++++++++++++++------ 1 file changed, 35 insertions(+), 7 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 6f1a87a9b28..e5a4638a653 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -895,6 +895,32 @@ def _add_object_class(self, name, base): setattr(self, name, obj) def _find_extremum(self, field, ext, source=None, to_array=True): + """ + Find the extremum value of a field in a data object (source) and its position. + + Parameters + ---------- + field: str or tuple(str, str) + + ext: str + 'min' or 'max', select an extremum + + source: a Yt data object + + to_array: bool + select the return type. + + Returns + ------- + val, coords + + val: unyt.unyt_quantity + extremum value detected + + coords: unyt.unyt_array or list(unyt.unyt_quantity) + Conversion to a single unyt_array object is only possible for coordinate + systems with homogeneous dimensions across axes (i.e. cartesian). + """ ext = ext.lower() if source is None: source = self.all_data() @@ -906,19 +932,19 @@ def _find_extremum(self, field, ext, source=None, to_array=True): coords = [x1, x2, x3] mylog.info("%s value is %0.5e at %0.16f %0.16f %0.16f", ext, val, *coords) if to_array: - # This is a hack to fix the fact that some non-cartesian datasets have - # dimensionless quantities, and we can't yet handle that. - alt_coords = [] - for x in coords: - alt_coords.append( - self.quan(x.v, "code_length") if x.units.is_dimensionless else x + if any(x.units.is_dimensionless for x in coords): + raise ValueError( + "Can not convert dimensionless coordinates to spatial." + "For non cartesian geometries, please use 'to_array=False'" ) - coords = self.arr(alt_coords, dtype="float64").to("code_length") + coords = self.arr(coords, dtype="float64").to("code_length") return val, coords def find_max(self, field, source=None, to_array=True): """ Returns (value, location) of the maximum of a given field. + + This is a wrapper around _find_extremum """ mylog.debug("Searching for maximum value of %s", field) return self._find_extremum(field, "max", source=source, to_array=to_array) @@ -926,6 +952,8 @@ def find_max(self, field, source=None, to_array=True): def find_min(self, field, source=None, to_array=True): """ Returns (value, location) for the minimum of a given field. + + This is a wrapper around _find_extremum """ mylog.debug("Searching for minimum value of %s", field) return self._find_extremum(field, "min", source=source, to_array=to_array) From ca99e3e993cb58452e50d426ddc12c49e373df8c Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 13:57:17 -0500 Subject: [PATCH 317/653] Remove metaclass, replace with __init_subclass__ --- yt/data_objects/data_containers.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index ca6ba6dadd9..34a978182b0 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -77,15 +77,7 @@ def _get_ipython_key_completion(ds): return tuple_keys + fnames -class RegisteredDataContainer(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - if hasattr(cls, "_type_name") and not cls._skip_add: - name = getattr(cls, "_override_selector_name", cls._type_name) - data_object_registry[name] = cls - - -class YTDataContainer(metaclass=RegisteredDataContainer): +class YTDataContainer: """ Generic YTDataContainer container. By itself, will attempt to generate field, read fields (method defined by derived classes) @@ -141,6 +133,12 @@ def __init__(self, ds, field_parameters): for key, val in field_parameters.items(): self.set_field_parameter(key, val) + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if hasattr(cls, "_type_name") and not cls._skip_add: + name = getattr(cls, "_override_selector_name", cls._type_name) + data_object_registry[name] = cls + @property def pf(self): return getattr(self, "ds", None) From 7700b1bdc464f04846737594e8e6617bd326da90 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 14:14:01 -0500 Subject: [PATCH 318/653] Remove metaclasses that only registered objects. --- yt/data_objects/analyzer_objects.py | 7 ++---- yt/data_objects/derived_quantities.py | 14 +++++------ yt/data_objects/static_output.py | 14 +++++------ yt/data_objects/time_series.py | 20 ++++++++-------- yt/frontends/exodus_ii/simulation_handling.py | 4 ++-- yt/frontends/ramses/field_handlers.py | 23 ++++++++----------- yt/frontends/ramses/particle_handlers.py | 23 ++++++++----------- yt/utilities/flagging_methods.py | 9 +++++--- yt/utilities/io_handler.py | 22 ++++++++---------- yt/visualization/fixed_resolution_filters.py | 12 ++++------ yt/visualization/plot_modifications.py | 14 +++++------ .../volume_rendering/shader_objects.py | 14 +++++------ 12 files changed, 77 insertions(+), 99 deletions(-) diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index 78663dfb820..ab0b5c7c7fc 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -3,15 +3,12 @@ analysis_task_registry = {} -class RegisteredTask(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) +class AnalysisTask: + def __init_subclass__(cls, *args, **kwargs): if hasattr(cls, "skip") and not cls.skip: return analysis_task_registry[cls.__name__] = cls - -class AnalysisTask(metaclass=RegisteredTask): def __init__(self, *args, **kwargs): # This should only get called if the subclassed object # does not override diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index cfaeb9a0683..1f62a415e4c 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -29,19 +29,17 @@ def get_position_fields(field, data): return position_fields -class RegisteredDerivedQuantity(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - if name != "DerivedQuantity": - derived_quantity_registry[name] = cls - - -class DerivedQuantity(ParallelAnalysisInterface, metaclass=RegisteredDerivedQuantity): +class DerivedQuantity(ParallelAnalysisInterface): num_vals = -1 def __init__(self, data_source): self.data_source = data_source + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if cls.__name__ != "DerivedQuantity": + derived_quantity_registry[cls.__name__] = cls + def count_values(self, *args, **kwargs): return diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index fcb06885539..3802d20364f 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -74,13 +74,6 @@ def _raise_unsupp(*args, **kwargs): return _raise_unsupp -class RegisteredDataset(abc.ABCMeta): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - output_type_registry[name] = cls - mylog.debug("Registering: %s as %s", name, cls) - - class IndexProxy: # This is a simple proxy for Index objects. It enables backwards # compatibility so that operations like .h.sphere, .h.print_stats and @@ -143,7 +136,7 @@ def ireq(self, value): return ireq -class Dataset(metaclass=RegisteredDataset): +class Dataset: default_fluid_type = "gas" default_field = ("gas", "density") @@ -198,6 +191,11 @@ def __new__(cls, filename=None, *args, **kwargs): obj = _cached_datasets[cache_key] return obj + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + output_type_registry[cls.__name__] = cls + mylog.debug("Registering: %s as %s", cls.__name__, cls) + def __init__( self, filename, diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 6660b175cd6..f948a9e2f99 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -510,6 +510,15 @@ def __init__(self, time_series, data_object_name, *args, **kwargs): ) self.quantities = TimeSeriesQuantitiesContainer(self, qs) + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if "Simulation" not in cls.__name__: + return + code_name = cls.__name__[: cls.__name__.find("Simulation")] + if code_name: + simulation_time_series_registry[code_name] = cls + mylog.debug("Registering simulation: %s as %s", code_name, cls) + def eval(self, tasks): return self.time_series.eval(tasks, self) @@ -520,16 +529,7 @@ def get(self, ds): return cls(*self._args, **self._kwargs) -class RegisteredSimulationTimeSeries(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - code_name = name[: name.find("Simulation")] - if code_name: - simulation_time_series_registry[code_name] = cls - mylog.debug("Registering simulation: %s as %s", code_name, cls) - - -class SimulationTimeSeries(DatasetSeries, metaclass=RegisteredSimulationTimeSeries): +class SimulationTimeSeries(DatasetSeries): def __init__(self, parameter_filename, find_outputs=False): """ Base class for generating simulation time series types. diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index a029e987c66..bfcee25759b 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -1,14 +1,14 @@ import glob from yt.convenience import load -from yt.data_objects.time_series import DatasetSeries, RegisteredSimulationTimeSeries +from yt.data_objects.time_series import DatasetSeries from yt.funcs import only_on_root from yt.utilities.exceptions import YTOutputNotIdentified from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects -class ExodusIISimulation(DatasetSeries, metaclass=RegisteredSimulationTimeSeries): +class ExodusIISimulation(DatasetSeries): r""" Initialize an ExodusII Simulation object. diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index a2bc835aee0..538d6945112 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -23,20 +23,7 @@ def register_field_handler(ph): DETECTED_FIELDS = {} -class RAMSESFieldFileHandlerRegistry(type): - """ - This is a base class that on instantiation registers the file - handler into the list. Used as a metaclass. - """ - - def __new__(meta, name, bases, class_dict): - cls = type.__new__(meta, name, bases, class_dict) - if cls.ftype is not None: - register_field_handler(cls) - return cls - - -class FieldFileHandler(metaclass=RAMSESFieldFileHandlerRegistry): +class FieldFileHandler: """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -61,6 +48,14 @@ class FieldFileHandler(metaclass=RAMSESFieldFileHandlerRegistry): None # Mapping from field to the type of the data (float, integer, ...) ) + def __init_subclass__(cls, *args, **kwargs): + """ + This ensures that when a new subclass is created, it's registered. + """ + super().__init_subclass__(*args, **kwargs) + if cls.ftype is not None: + register_field_handler(cls) + def __init__(self, domain): """ Initalize an instance of the class. This automatically sets diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index bb19f490643..062bfc15721 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -18,20 +18,7 @@ def register_particle_handler(ph): PARTICLE_HANDLERS.add(ph) -class RAMSESParticleFileHandlerRegistry(type): - """ - This is a base class that on instantiation registers the file - handler into the list. Used as a metaclass. - """ - - def __new__(meta, name, bases, class_dict): - cls = type.__new__(meta, name, bases, class_dict) - if cls.ptype is not None: - register_particle_handler(cls) - return cls - - -class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): +class ParticleFileHandler: """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -57,6 +44,14 @@ class ParticleFileHandler(metaclass=RAMSESParticleFileHandlerRegistry): ) local_particle_count = None # The number of particle in the domain + def __init_subclass__(cls, *args, **kwargs): + """ + This ensures that when a new subclass is created, it's registered. + """ + super().__init_subclass__(*args, **kwargs) + if cls.ptype is not None: + register_particle_handler(cls) + def __init__(self, ds, domain): """ Initalize an instance of the class. This automatically sets diff --git a/yt/utilities/flagging_methods.py b/yt/utilities/flagging_methods.py index 01743a5a72e..04490da9a2a 100644 --- a/yt/utilities/flagging_methods.py +++ b/yt/utilities/flagging_methods.py @@ -8,13 +8,16 @@ class RegisteredFlaggingMethod(type): def __init__(cls, name, b, d): type.__init__(cls, name, b, d) - if hasattr(cls, "_type_name") and not cls._skip_add: - flagging_method_registry[cls._type_name] = cls -class FlaggingMethod(metaclass=RegisteredFlaggingMethod): +class FlaggingMethod: _skip_add = False + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if hasattr(cls, "_type_name") and not cls._skip_add: + flagging_method_registry[cls._type_name] = cls + class OverDensity(FlaggingMethod): _type_name = "overdensity" diff --git a/yt/utilities/io_handler.py b/yt/utilities/io_handler.py index 67b0c86f7e4..d74f1370e17 100644 --- a/yt/utilities/io_handler.py +++ b/yt/utilities/io_handler.py @@ -19,18 +19,7 @@ def _make_io_key(args, *_args, **kwargs): return _make_key((obj.id, field), *_args, **kwargs) -class RegisteredIOHandler(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - if hasattr(cls, "_dataset_type"): - io_registry[cls._dataset_type] = cls - if use_caching and hasattr(cls, "_read_obj_field"): - cls._read_obj_field = lru_cache( - maxsize=use_caching, typed=True, make_key=_make_io_key - )(cls._read_obj_field) - - -class BaseIOHandler(metaclass=RegisteredIOHandler): +class BaseIOHandler: _vector_fields = () _dataset_type = None _particle_reader = False @@ -38,6 +27,15 @@ class BaseIOHandler(metaclass=RegisteredIOHandler): _misses = 0 _hits = 0 + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if hasattr(cls, "_dataset_type"): + io_registry[cls._dataset_type] = cls + if use_caching and hasattr(cls, "_read_obj_field"): + cls._read_obj_field = lru_cache( + maxsize=use_caching, typed=True, make_key=_make_io_key + )(cls._read_obj_field) + def __init__(self, ds): self.queue = defaultdict(dict) self.ds = ds diff --git a/yt/visualization/fixed_resolution_filters.py b/yt/visualization/fixed_resolution_filters.py index e7b810deede..d69dc8fafdd 100644 --- a/yt/visualization/fixed_resolution_filters.py +++ b/yt/visualization/fixed_resolution_filters.py @@ -14,19 +14,17 @@ def newfunc(*args, **kwargs): return newfunc -class RegisteredFilter(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - filter_registry[name] = cls - - -class FixedResolutionBufferFilter(metaclass=RegisteredFilter): +class FixedResolutionBufferFilter: """ This object allows to apply data transformation directly to :class:`yt.visualization.fixed_resolution.FixedResolutionBuffer` """ + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + filter_registry[cls.__name__] = cls + def __init__(self, *args, **kwargs): pass diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 5f79e4abbce..d57f423edb8 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -47,14 +47,7 @@ def _check_geometry(self, plot): return _check_geometry -class RegisteredCallback(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - callback_registry[name] = cls - cls.__call__ = _verify_geometry(cls.__call__) - - -class PlotCallback(metaclass=RegisteredCallback): +class PlotCallback: # _supported_geometries is set by subclasses of PlotCallback to a tuple of # strings corresponding to the names of the geometries that a callback # supports. By default it is None, which means it supports everything. @@ -64,6 +57,11 @@ class PlotCallback(metaclass=RegisteredCallback): # and will only look at the geometries. _supported_geometries = None + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + callback_registry[cls.__name__] = cls + cls.__call__ = _verify_geometry(cls.__call__) + def __init__(self, *args, **kwargs): pass diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index eba6c4a4adb..abad5cde440 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -133,14 +133,7 @@ def disable_vert_attrib(self, name): GL.glDisableVertexAttribArray(loc) -class RegisteredShader(type): - def __init__(cls, name, b, d): - type.__init__(cls, name, b, d) - if getattr(cls, "_shader_name", None) is not None: - known_shaders[cls._shader_name] = cls - - -class Shader(metaclass=RegisteredShader): +class Shader: """ Creates a shader from source @@ -158,6 +151,11 @@ class Shader(metaclass=RegisteredShader): _source = None _shader_name = None + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + if getattr(cls, "_shader_name", None) is not None: + known_shaders[cls._shader_name] = cls + def __init__(self, source=None): if source: self.compile(source) From 5206a0a8975076198bbeeba653a8c1a1269da437 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 20:26:18 +0100 Subject: [PATCH 319/653] Fix function args. --- yt/frontends/adaptahop/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 63fde4b8402..27b205ec046 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -32,7 +32,7 @@ def _setup_filenames(self): cls = self.dataset._file_class if ndoms > 1: self.data_files = [ - cls(self.dataset, self.io, template % {"num": i}, i, range=None) + cls(self.dataset, self.io, template % {"num": i}, i, None) for i in range(ndoms) ] else: @@ -42,7 +42,7 @@ def _setup_filenames(self): self.io, self.dataset.parameter_filename, 0, - range=None, + None, ) ] self.total_particles = sum( From 17c8fe209e6fa824992ee46bde21150f2e7a386f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 20:29:46 +0100 Subject: [PATCH 320/653] Typo. --- yt/frontends/gadget_fof/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index b147a0de28b..1cd2064e5b3 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -490,7 +490,7 @@ def __repr__(self): def _setup_classes(self): self.objects = [] - def _set_code_units_attributes(self): + def _set_code_unit_attributes(self): pass def _is_valid(self, *args, **kwargs): From c971fa426c8b83db18e24ec1f6f40bd4f26c7296 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 20:32:16 +0100 Subject: [PATCH 321/653] Add decorator. --- yt/frontends/gadget_fof/data_structures.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 1cd2064e5b3..bfcb55ce6e3 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -493,6 +493,7 @@ def _setup_classes(self): def _set_code_unit_attributes(self): pass + @classmethod def _is_valid(self, *args, **kwargs): return False From 028f8e30ce3fdf6eedfed6996ca11e63c710ca37 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 6 Aug 2020 20:37:24 +0100 Subject: [PATCH 322/653] Return removed functions. --- yt/frontends/gadget_fof/data_structures.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index bfcb55ce6e3..211ddb2994e 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -433,6 +433,10 @@ def _get_halo_values(self, ptype, identifiers, fields, f=None): return data + def _setup_data_io(self): + super(GadgetFOFHaloParticleIndex, self)._setup_data_io() + self._create_halo_id_table() + class GadgetFOFHaloDataset(ParticleDataset): _index_class = GadgetFOFHaloParticleIndex @@ -479,9 +483,7 @@ def _parse_parameter_file(self): setattr(self, attr, getattr(self.real_ds, attr)) def set_code_units(self): - for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: - my_unit = "%s_unit" % unit - setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) + self._set_code_unit_attributes() self.unit_registry = self.real_ds.unit_registry def __repr__(self): @@ -491,7 +493,9 @@ def _setup_classes(self): self.objects = [] def _set_code_unit_attributes(self): - pass + for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: + my_unit = "%s_unit" % unit + setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) @classmethod def _is_valid(self, *args, **kwargs): From b3b911f9face92fc46c797f02d7e94b1f320aaf6 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 15:07:46 -0500 Subject: [PATCH 323/653] Update yt/frontends/ramses/field_handlers.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/frontends/ramses/field_handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 538d6945112..35f768f1e94 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -50,7 +50,7 @@ class FieldFileHandler: def __init_subclass__(cls, *args, **kwargs): """ - This ensures that when a new subclass is created, it's registered. + Registers subclasses at creation. """ super().__init_subclass__(*args, **kwargs) if cls.ftype is not None: From 1b9c36779c02e81ad471193ff0a715116d2e2895 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 15:08:15 -0500 Subject: [PATCH 324/653] Update yt/frontends/ramses/particle_handlers.py --- yt/frontends/ramses/particle_handlers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 062bfc15721..1e69e7d4591 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -46,7 +46,7 @@ class ParticleFileHandler: def __init_subclass__(cls, *args, **kwargs): """ - This ensures that when a new subclass is created, it's registered. + Registers a subclass at creation. """ super().__init_subclass__(*args, **kwargs) if cls.ptype is not None: From c5736e3675f186e4804d83ff96c622ad71b39c09 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 17:04:27 +0200 Subject: [PATCH 325/653] replace error with logging entry --- yt/data_objects/static_output.py | 17 +++++++++++++---- 1 file changed, 13 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index e5a4638a653..cf1ca8ab6bb 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -933,11 +933,20 @@ def _find_extremum(self, field, ext, source=None, to_array=True): mylog.info("%s value is %0.5e at %0.16f %0.16f %0.16f", ext, val, *coords) if to_array: if any(x.units.is_dimensionless for x in coords): - raise ValueError( - "Can not convert dimensionless coordinates to spatial." - "For non cartesian geometries, please use 'to_array=False'" + mylog.warning( + f"dataset {self} has angular coordinates. " + "Use 'to_array=False' to preserve " + "dimensionality in each coordinate." + ) + + # force conversion to length + alt_coords = [] + for x in coords: + alt_coords.append( + self.quan(x.v, "code_length") if x.units.is_dimensionless else x ) - coords = self.arr(coords, dtype="float64").to("code_length") + + coords = self.arr(alt_coords, dtype="float64").to("code_length") return val, coords def find_max(self, field, source=None, to_array=True): From 4523fc32767d9a8f4f7d56a17e69082414fc0598 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 21:22:57 -0500 Subject: [PATCH 326/653] Move global object registries into their own file --- yt/convenience.py | 2 +- yt/data_objects/analyzer_objects.py | 2 +- yt/data_objects/api.py | 1 - yt/data_objects/data_containers.py | 3 +-- yt/data_objects/derived_quantities.py | 3 +-- yt/data_objects/static_output.py | 8 ++------ yt/data_objects/time_series.py | 15 +++++++-------- yt/tests/test_load_errors.py | 2 +- yt/utilities/command_line.py | 2 +- yt/utilities/object_registries.py | 7 +++++++ yt/utilities/parameter_file_storage.py | 3 +-- yt/visualization/volume_rendering/old_camera.py | 2 +- 12 files changed, 24 insertions(+), 26 deletions(-) create mode 100644 yt/utilities/object_registries.py diff --git a/yt/convenience.py b/yt/convenience.py index 2b1516f35a1..13902ac84dc 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -8,7 +8,7 @@ YTSimulationNotIdentified, ) from yt.utilities.hierarchy_inspection import find_lowest_subclasses -from yt.utilities.parameter_file_storage import ( +from yt.utilities.object_registries import ( output_type_registry, simulation_time_series_registry, ) diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index ab0b5c7c7fc..4bf3337f07a 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -1,6 +1,6 @@ import inspect -analysis_task_registry = {} +from yt.utilities.object_registries import analysis_task_registry class AnalysisTask: diff --git a/yt/data_objects/api.py b/yt/data_objects/api.py index 4dca4a02880..f0f94d5ad01 100644 --- a/yt/data_objects/api.py +++ b/yt/data_objects/api.py @@ -1,6 +1,5 @@ from . import construction_data_containers as __cdc, selection_data_containers as __sdc from .analyzer_objects import AnalysisTask, analysis_task -from .data_containers import data_object_registry from .grid_patch import AMRGridPatch from .image_array import ImageArray from .octree_subset import OctreeSubset diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 34a978182b0..ff661ae035f 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -42,6 +42,7 @@ YTSpatialFieldUnitError, ) from yt.utilities.lib.marching_cubes import march_cubes_grid, march_cubes_grid_flux +from yt.utilities.object_registries import data_object_registry from yt.utilities.parallel_tools.parallel_analysis_interface import ( ParallelAnalysisInterface, ) @@ -49,8 +50,6 @@ from .derived_quantities import DerivedQuantityCollection -data_object_registry = {} - def sanitize_weight_field(ds, field, weight): field_object = ds._get_field_info(field) diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index 1f62a415e4c..27738737e58 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -3,6 +3,7 @@ from yt.funcs import camelcase_to_underscore, ensure_list from yt.units.yt_array import array_like_field from yt.utilities.exceptions import YTParticleTypeNotFound +from yt.utilities.object_registries import derived_quantity_registry from yt.utilities.parallel_tools.parallel_analysis_interface import ( ParallelAnalysisInterface, parallel_objects, @@ -10,8 +11,6 @@ from yt.utilities.physical_constants import gravitational_constant_cgs from yt.utilities.physical_ratios import HUGE -derived_quantity_registry = {} - def get_position_fields(field, data): axis_names = [data.ds.coordinates.axis_name[num] for num in [0, 1, 2]] diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 3802d20364f..aaeeb8f9900 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -12,7 +12,6 @@ import numpy as np from yt.config import ytcfg -from yt.data_objects.data_containers import data_object_registry from yt.data_objects.particle_filters import filter_registry from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.region_expression import RegionExpression @@ -52,12 +51,9 @@ YTObjectNotImplemented, ) from yt.utilities.minimal_representation import MinimalDataset +from yt.utilities.object_registries import data_object_registry, output_type_registry from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only -from yt.utilities.parameter_file_storage import ( - NoParameterShelf, - ParameterFileStore, - output_type_registry, -) +from yt.utilities.parameter_file_storage import NoParameterShelf, ParameterFileStore # We want to support the movie format in the future. # When such a thing comes to pass, I'll move all the stuff that is constant up diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index f948a9e2f99..91425c9078b 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -9,23 +9,22 @@ from yt.config import ytcfg from yt.convenience import load -from yt.data_objects.analyzer_objects import ( - AnalysisTask, - analysis_task_registry, - create_quantity_proxy, -) -from yt.data_objects.data_containers import data_object_registry -from yt.data_objects.derived_quantities import derived_quantity_registry +from yt.data_objects.analyzer_objects import AnalysisTask, create_quantity_proxy from yt.data_objects.particle_trajectories import ParticleTrajectories from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.exceptions import YTException, YTOutputNotIdentified +from yt.utilities.object_registries import ( + analysis_task_registry, + data_object_registry, + derived_quantity_registry, + simulation_time_series_registry, +) from yt.utilities.parallel_tools.parallel_analysis_interface import ( communication_system, parallel_objects, parallel_root_only, ) -from yt.utilities.parameter_file_storage import simulation_time_series_registry class AnalysisTaskProxy: diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 7990711e27e..68c7da29ceb 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -10,7 +10,7 @@ YTOutputNotIdentified, YTSimulationNotIdentified, ) -from yt.utilities.parameter_file_storage import output_type_registry +from yt.utilities.object_registries import output_type_registry def test_load_nonexistent_data(): diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 27fcb2e8079..e20fa67332d 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -1719,7 +1719,7 @@ class YTSearchCmd(YTCommand): name = "search" def __call__(self, args): - from yt.utilities.parameter_file_storage import output_type_registry + from yt.utilities.object_registries import output_type_registry candidates = [] for base, dirs, files in os.walk(".", followlinks=True): diff --git a/yt/utilities/object_registries.py b/yt/utilities/object_registries.py new file mode 100644 index 00000000000..8c7018501e6 --- /dev/null +++ b/yt/utilities/object_registries.py @@ -0,0 +1,7 @@ +# These are some of the data object registries that are used in different places in the code. Not all of the self-registering objects are included in these. + +analysis_task_registry = {} +data_object_registry = {} +derived_quantity_registry = {} +output_type_registry = {} +simulation_time_series_registry = {} diff --git a/yt/utilities/parameter_file_storage.py b/yt/utilities/parameter_file_storage.py index db8a6f7e41a..5c6191feb98 100644 --- a/yt/utilities/parameter_file_storage.py +++ b/yt/utilities/parameter_file_storage.py @@ -4,12 +4,11 @@ from yt.config import ytcfg from yt.funcs import mylog +from yt.utilities.object_registries import output_type_registry from yt.utilities.parallel_tools.parallel_analysis_interface import ( parallel_simple_proxy, ) -output_type_registry = {} -simulation_time_series_registry = {} _field_names = ("hash", "bn", "fp", "tt", "ctid", "class_name", "last_seen") diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 3136bb69daf..5d948630fe3 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -5,7 +5,6 @@ from yt.config import ytcfg from yt.data_objects.api import ImageArray -from yt.data_objects.data_containers import data_object_registry from yt.funcs import ensure_numpy_array, get_num_threads, get_pbar, iterable, mylog from yt.units.yt_array import YTArray from yt.utilities.amr_kdtree.api import AMRKDTree @@ -24,6 +23,7 @@ from yt.utilities.lib.misc_utilities import lines from yt.utilities.lib.partitioned_grid import PartitionedGrid from yt.utilities.math_utils import get_rotation_matrix +from yt.utilities.object_registries import data_object_registry from yt.utilities.orientation import Orientation from yt.utilities.parallel_tools.parallel_analysis_interface import ( ParallelAnalysisInterface, From 604f03970552be4270a47e482cad8ae3c8e5aead Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 6 Aug 2020 21:25:50 -0500 Subject: [PATCH 327/653] Fix Simulation Series registry initialization --- yt/data_objects/time_series.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 91425c9078b..db82bc0d63a 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -155,6 +155,13 @@ class DatasetSeries: """ + def __init_subclass__(cls, *args, **kwargs): + super().__init_subclass__(*args, **kwargs) + code_name = cls.__name__[: cls.__name__.find("Simulation")] + if code_name: + simulation_time_series_registry[code_name] = cls + mylog.debug("Registering simulation: %s as %s", code_name, cls) + def __new__(cls, outputs, *args, **kwargs): if isinstance(outputs, str): outputs = get_filenames_from_glob_pattern(outputs) @@ -509,15 +516,6 @@ def __init__(self, time_series, data_object_name, *args, **kwargs): ) self.quantities = TimeSeriesQuantitiesContainer(self, qs) - def __init_subclass__(cls, *args, **kwargs): - super().__init_subclass__(*args, **kwargs) - if "Simulation" not in cls.__name__: - return - code_name = cls.__name__[: cls.__name__.find("Simulation")] - if code_name: - simulation_time_series_registry[code_name] = cls - mylog.debug("Registering simulation: %s as %s", code_name, cls) - def eval(self, tasks): return self.time_series.eval(tasks, self) From 190b1ee4673d86b801b721215ae0d772c926c105 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 7 Aug 2020 07:22:31 +0100 Subject: [PATCH 328/653] run black again. --- yt/frontends/adaptahop/data_structures.py | 8 +------- 1 file changed, 1 insertion(+), 7 deletions(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 27b205ec046..aea8bb14dbb 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -37,13 +37,7 @@ def _setup_filenames(self): ] else: self.data_files = [ - cls( - self.dataset, - self.io, - self.dataset.parameter_filename, - 0, - None, - ) + cls(self.dataset, self.io, self.dataset.parameter_filename, 0, None,) ] self.total_particles = sum( sum(d.total_particles.values()) for d in self.data_files From e00ac5bcdfb546ec58a5abc621435bc2af132e64 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 09:23:23 +0200 Subject: [PATCH 329/653] Remove trailing whitespace --- yt/data_objects/static_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index c002c1a037b..2c75c39061a 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1385,7 +1385,7 @@ def add_field(self, name, function, sampling_type, **kwargs): # Handle the case where the field has already been added. if not override and name in self.field_info: mylog.warning( - "Field %s already exists. To override use `force_override=True`.", name, + "Field %s already exists. To override use `force_override=True`.", name, ) self.field_info.add_field(name, function, sampling_type, **kwargs) From f7445f02022293f1b089cd8907000301516354bf Mon Sep 17 00:00:00 2001 From: convert-repo Date: Thu, 6 Aug 2020 16:14:12 +0200 Subject: [PATCH 330/653] Applying flynt bluntly. --- yt/config.py | 2 +- yt/data_objects/analyzer_objects.py | 4 +- .../construction_data_containers.py | 38 ++-- yt/data_objects/data_containers.py | 36 ++-- yt/data_objects/derived_quantities.py | 12 +- yt/data_objects/grid_patch.py | 2 +- yt/data_objects/level_sets/clump_handling.py | 16 +- yt/data_objects/octree_subset.py | 8 +- yt/data_objects/particle_trajectories.py | 8 +- yt/data_objects/profiles.py | 20 +- yt/data_objects/selection_data_containers.py | 6 +- yt/data_objects/static_output.py | 16 +- yt/data_objects/tests/test_covering_grid.py | 12 +- yt/data_objects/tests/test_fluxes.py | 8 +- yt/data_objects/tests/test_pickle.py | 4 +- yt/data_objects/tests/test_profiles.py | 2 +- yt/data_objects/tests/test_projection.py | 4 +- .../tests/test_sph_data_objects.py | 2 +- yt/data_objects/tests/test_time_series.py | 2 +- yt/data_objects/time_series.py | 2 +- yt/data_objects/unstructured_mesh.py | 2 +- yt/exthook.py | 2 +- yt/fields/derived_field.py | 10 +- yt/fields/field_exceptions.py | 10 +- yt/fields/field_functions.py | 4 +- yt/fields/field_info_container.py | 16 +- yt/fields/fluid_fields.py | 8 +- yt/fields/fluid_vector_fields.py | 32 ++- yt/fields/magnetic_field.py | 16 +- yt/fields/particle_fields.py | 46 ++--- yt/fields/species_fields.py | 70 +++---- yt/fields/tests/test_fields.py | 4 +- yt/fields/tests/test_fields_plugins.py | 2 +- yt/fields/tests/test_sph_fields.py | 4 +- yt/fields/tests/test_vector_fields.py | 8 +- yt/fields/vector_operations.py | 194 +++++++++--------- yt/fields/xray_emission_fields.py | 20 +- yt/frontends/adaptahop/data_structures.py | 2 +- yt/frontends/adaptahop/fields.py | 4 +- yt/frontends/adaptahop/io.py | 2 +- yt/frontends/ahf/data_structures.py | 2 +- yt/frontends/amrvac/fields.py | 14 +- yt/frontends/api.py | 2 +- yt/frontends/arepo/data_structures.py | 2 +- yt/frontends/arepo/fields.py | 2 +- yt/frontends/art/data_structures.py | 8 +- yt/frontends/art/fields.py | 6 +- yt/frontends/art/io.py | 10 +- yt/frontends/art/tests/test_outputs.py | 4 +- yt/frontends/artio/data_structures.py | 8 +- yt/frontends/artio/fields.py | 4 +- yt/frontends/athena/data_structures.py | 22 +- yt/frontends/athena/fields.py | 12 +- yt/frontends/athena/tests/test_outputs.py | 4 +- yt/frontends/athena_pp/data_structures.py | 4 +- yt/frontends/athena_pp/fields.py | 4 +- yt/frontends/athena_pp/io.py | 4 +- yt/frontends/boxlib/data_structures.py | 4 +- yt/frontends/boxlib/fields.py | 26 +-- yt/frontends/chombo/data_structures.py | 8 +- yt/frontends/chombo/fields.py | 16 +- yt/frontends/chombo/io.py | 2 +- yt/frontends/enzo/answer_testing_support.py | 2 +- yt/frontends/enzo/data_structures.py | 6 +- yt/frontends/enzo/fields.py | 8 +- yt/frontends/enzo/io.py | 2 +- yt/frontends/enzo/simulation_handling.py | 10 +- yt/frontends/enzo/tests/test_outputs.py | 8 +- yt/frontends/enzo_p/data_structures.py | 4 +- yt/frontends/enzo_p/io.py | 2 +- yt/frontends/enzo_p/tests/test_misc.py | 8 +- yt/frontends/exodus_ii/data_structures.py | 6 +- yt/frontends/exodus_ii/simulation_handling.py | 2 +- yt/frontends/fits/data_structures.py | 10 +- yt/frontends/fits/misc.py | 10 +- yt/frontends/flash/data_structures.py | 16 +- yt/frontends/flash/fields.py | 6 +- yt/frontends/flash/io.py | 12 +- yt/frontends/gadget/data_structures.py | 6 +- yt/frontends/gadget/io.py | 8 +- yt/frontends/gadget/simulation_handling.py | 6 +- yt/frontends/gadget_fof/data_structures.py | 17 +- yt/frontends/gadget_fof/io.py | 2 +- yt/frontends/gamer/data_structures.py | 2 +- yt/frontends/gamer/fields.py | 6 +- yt/frontends/gdf/data_structures.py | 4 +- yt/frontends/gdf/fields.py | 2 +- yt/frontends/gdf/io.py | 2 +- yt/frontends/gizmo/fields.py | 8 +- yt/frontends/gizmo/tests/test_outputs.py | 12 +- yt/frontends/halo_catalog/data_structures.py | 4 +- .../halo_catalog/tests/test_outputs.py | 6 +- yt/frontends/http_stream/io.py | 2 +- yt/frontends/moab/io.py | 2 +- yt/frontends/open_pmd/data_structures.py | 4 +- yt/frontends/open_pmd/fields.py | 12 +- yt/frontends/open_pmd/misc.py | 2 +- yt/frontends/owls/simulation_handling.py | 4 +- yt/frontends/owls_subfind/data_structures.py | 2 +- yt/frontends/ramses/data_structures.py | 10 +- yt/frontends/ramses/field_handlers.py | 11 +- yt/frontends/ramses/fields.py | 21 +- yt/frontends/ramses/io.py | 10 +- yt/frontends/ramses/particle_handlers.py | 7 +- yt/frontends/ramses/tests/test_outputs.py | 8 +- yt/frontends/rockstar/data_structures.py | 4 +- yt/frontends/sdf/data_structures.py | 6 +- yt/frontends/sph/data_structures.py | 2 +- yt/frontends/stream/data_structures.py | 6 +- yt/frontends/stream/fields.py | 2 +- yt/frontends/stream/io.py | 12 +- .../stream/tests/test_stream_particles.py | 12 +- yt/frontends/swift/io.py | 6 +- yt/frontends/tipsy/data_structures.py | 2 +- yt/frontends/tipsy/io.py | 4 +- yt/frontends/ytdata/data_structures.py | 28 ++- yt/frontends/ytdata/tests/test_outputs.py | 2 +- yt/frontends/ytdata/utilities.py | 2 +- yt/funcs.py | 47 +++-- .../coordinates/cartesian_coordinates.py | 8 +- yt/geometry/coordinates/coordinate_handler.py | 6 +- .../coordinates/geographic_coordinates.py | 6 +- .../coordinates/spec_cube_coordinates.py | 6 +- .../tests/test_cartesian_coordinates.py | 4 +- .../tests/test_cylindrical_coordinates.py | 6 +- .../tests/test_geographic_coordinates.py | 4 +- .../tests/test_polar_coordinates.py | 2 +- .../tests/test_spherical_coordinates.py | 2 +- yt/geometry/geometry_handler.py | 14 +- yt/geometry/grid_geometry_handler.py | 6 +- yt/geometry/object_finding_mixin.py | 8 +- yt/geometry/oct_geometry_handler.py | 2 +- yt/geometry/tests/test_particle_octree.py | 42 ++-- yt/pmods.py | 6 +- yt/startup_tasks.py | 2 +- yt/testing.py | 26 ++- yt/utilities/amr_kdtree/amr_kdtree.py | 10 +- yt/utilities/answer_testing/answer_tests.py | 2 +- yt/utilities/answer_testing/framework.py | 28 +-- .../answer_testing/level_sets_tests.py | 2 +- yt/utilities/answer_testing/utils.py | 4 +- yt/utilities/command_line.py | 84 ++++---- yt/utilities/configure.py | 8 +- yt/utilities/cosmology.py | 2 +- yt/utilities/exceptions.py | 118 +++++------ yt/utilities/flagging_methods.py | 2 +- yt/utilities/fortran_utils.py | 6 +- .../conversion/conversion_athena.py | 2 +- yt/utilities/lib/cykdtree/tests/__init__.py | 20 +- yt/utilities/lib/cykdtree/tests/scaling.py | 16 +- .../lib/cykdtree/tests/test_kdtree.py | 4 +- yt/utilities/lib/tests/test_geometry_utils.py | 8 +- yt/utilities/load_sample.py | 6 +- yt/utilities/lodgeit.py | 16 +- yt/utilities/minimal_representation.py | 2 +- yt/utilities/on_demand_imports.py | 3 +- .../parallel_analysis_interface.py | 4 +- yt/utilities/parameter_file_storage.py | 10 +- yt/utilities/particle_generator.py | 2 +- yt/utilities/performance_counters.py | 4 +- yt/utilities/periodic_table.py | 2 +- yt/utilities/rpdb.py | 8 +- yt/utilities/sdf.py | 20 +- yt/utilities/tests/test_config.py | 2 +- yt/utilities/tests/test_cosmology.py | 10 +- yt/visualization/base_plot_types.py | 2 +- yt/visualization/color_maps.py | 2 +- yt/visualization/eps_writer.py | 14 +- yt/visualization/fits_image.py | 26 ++- yt/visualization/fixed_resolution.py | 2 +- yt/visualization/image_writer.py | 6 +- yt/visualization/mapserver/pannable_map.py | 20 +- yt/visualization/plot_container.py | 8 +- yt/visualization/plot_modifications.py | 32 +-- yt/visualization/plot_window.py | 16 +- yt/visualization/profile_plotter.py | 22 +- .../tests/test_geo_projections.py | 4 +- yt/visualization/tests/test_line_plots.py | 2 +- yt/visualization/tests/test_mesh_slices.py | 8 +- yt/visualization/tests/test_plotwindow.py | 2 +- yt/visualization/tests/test_profile_plots.py | 8 +- .../tests/test_raw_field_slices.py | 4 +- yt/visualization/volume_rendering/camera.py | 6 +- .../volume_rendering/image_handling.py | 8 +- .../volume_rendering/input_events.py | 8 +- .../interactive_vr_helpers.py | 3 +- yt/visualization/volume_rendering/lens.py | 8 +- .../volume_rendering/off_axis_projection.py | 2 +- .../volume_rendering/old_camera.py | 2 +- .../volume_rendering/render_source.py | 8 +- yt/visualization/volume_rendering/scene.py | 14 +- .../volume_rendering/shader_objects.py | 6 +- .../tests/test_camera_attributes.py | 6 +- .../volume_rendering/tests/test_lenses.py | 12 +- .../tests/test_mesh_render.py | 28 +-- .../volume_rendering/tests/test_scene.py | 2 +- .../tests/test_vr_orientation.py | 2 +- .../volume_rendering/transfer_functions.py | 4 +- .../volume_rendering/volume_rendering.py | 3 +- 199 files changed, 1000 insertions(+), 1110 deletions(-) diff --git a/yt/config.py b/yt/config.py index 09d459a611f..56789e1d4b2 100644 --- a/yt/config.py +++ b/yt/config.py @@ -90,7 +90,7 @@ # We changed them all to lowercase if option.lower() in ytcfg_defaults: new_cp.set("yt", option, cp.get(section, option)) - print("Setting %s to %s" % (option, cp.get(section, option))) + print(f"Setting {option} to {cp.get(section, option)}") open(_OLD_CONFIG_FILE + ".old", "w").write(f) new_cp.write(open(_OLD_CONFIG_FILE, "w")) diff --git a/yt/data_objects/analyzer_objects.py b/yt/data_objects/analyzer_objects.py index 78663dfb820..311f7d49002 100644 --- a/yt/data_objects/analyzer_objects.py +++ b/yt/data_objects/analyzer_objects.py @@ -22,7 +22,7 @@ def __init__(self, *args, **kwargs): def __repr__(self): # Stolen from YTDataContainer.__repr__ - s = "%s: " % (self.__class__.__name__) + s = f"{self.__class__.__name__}: " s += ", ".join(["%s=%s" % (i, getattr(self, i)) for i in self._params]) return s @@ -69,7 +69,7 @@ class QuantityProxy(AnalysisTask): def __repr__(self): # Stolen from YTDataContainer.__repr__ - s = "%s: " % (self.__class__.__name__) + s = f"{self.__class__.__name__}: " s += ", ".join(["%s" % [arg for arg in self.args]]) s += ", ".join(["%s=%s" % (k, v) for k, v in self.kwargs.items()]) return s diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 87996ef9d69..9818b3ccdbf 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -343,7 +343,7 @@ def _initialize_projected_units(self, fields, chunk): path_length_unit = Unit(registry=self.ds.unit_registry) else: ax_name = self.ds.coordinates.axis_name[self.axis] - path_element_name = ("index", "path_element_%s" % (ax_name)) + path_element_name = ("index", f"path_element_{ax_name}") path_length_unit = self.ds.field_info[path_element_name].units path_length_unit = Unit( path_length_unit, registry=self.ds.unit_registry @@ -552,7 +552,7 @@ def _handle_chunk(self, chunk, fields, tree): dl = self.ds.quan(1.0, "") else: ax_name = self.ds.coordinates.axis_name[self.axis] - dl = chunk["index", "path_element_%s" % (ax_name)] + dl = chunk["index", f"path_element_{ax_name}"] # This is done for cases where our path element does not have a CGS # equivalent. Once "preferred units" have been implemented, this # will not be necessary at all, as the final conversion will occur @@ -876,12 +876,12 @@ def _fill_sph_particles(self, fields): fi = self.ds._get_field_info(field) ptype = fi.name[0] if ptype not in self.ds._sph_ptypes: - raise KeyError("%s is not a SPH particle type!" % ptype) + raise KeyError(f"{ptype} is not a SPH particle type!") buff = np.zeros(size, dtype="float64") if normalize: buff_den = np.zeros(size, dtype="float64") - pbar = tqdm(desc="Interpolating SPH field {}".format(field)) + pbar = tqdm(desc=f"Interpolating SPH field {field}") for chunk in self._data_source.chunks([field], "io"): px = chunk[(ptype, "particle_position_x")].in_base("code").d py = chunk[(ptype, "particle_position_y")].in_base("code").d @@ -991,11 +991,11 @@ def _fill_fields(self, fields): def _generate_container_field(self, field): rv = self.ds.arr(np.ones(self.ActiveDimensions, dtype="float64"), "") axis_name = self.ds.coordinates.axis_name - if field == ("index", "d%s" % axis_name[0]): + if field == ("index", f"d{axis_name[0]}"): np.multiply(rv, self.dds[0], rv) - elif field == ("index", "d%s" % axis_name[1]): + elif field == ("index", f"d{axis_name[1]}"): np.multiply(rv, self.dds[1], rv) - elif field == ("index", "d%s" % axis_name[2]): + elif field == ("index", f"d{axis_name[2]}"): np.multiply(rv, self.dds[2], rv) elif field == ("index", axis_name[0]): x = np.mgrid[ @@ -1031,7 +1031,7 @@ def RightEdge(self): return self.right_edge def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): - cls = getattr(particle_deposit, "deposit_%s" % method, None) + cls = getattr(particle_deposit, f"deposit_{method}", None) if cls is None: raise YTParticleDepositionNotImplemented(method) # We allocate number of zones, not number of octs. Everything @@ -2035,22 +2035,20 @@ def _export_obj( fmtl.write( "newmtl " + omname + "\n" ) # the specific material (color) for this face + fmtl.write(f"Ka {0.0:.6f} {0.0:.6f} {0.0:.6f}\n") # ambient color, keep off fmtl.write( - "Ka %.6f %.6f %.6f\n" % (0.0, 0.0, 0.0) - ) # ambient color, keep off - fmtl.write( - "Kd %.6f %.6f %.6f\n" % (lut[0][i], lut[1][i], lut[2][i]) + f"Kd {lut[0][i]:.6f} {lut[1][i]:.6f} {lut[2][i]:.6f}\n" ) # color of face fmtl.write( - "Ks %.6f %.6f %.6f\n" % (0.0, 0.0, 0.0) + f"Ks {0.0:.6f} {0.0:.6f} {0.0:.6f}\n" ) # specular color, keep off - fmtl.write("d %.6f\n" % (transparency)) # transparency - fmtl.write("em %.6f\n" % (emiss[i])) # emissivity per color + fmtl.write(f"d {transparency:.6f}\n") # transparency + fmtl.write(f"em {emiss[i]:.6f}\n") # emissivity per color fmtl.write("illum 2\n") # not relevant, 2 means highlights on? fmtl.write("Ns %.6f\n\n" % (0.0)) # keep off, some other specular thing # (2) write vertices for i in range(0, self.vertices.shape[1]): - fobj.write("v %.6f %.6f %.6f\n" % (v["x"][i], v["y"][i], v["z"][i])) + fobj.write(f"v {v['x'][i]:.6f} {v['y'][i]:.6f} {v['z'][i]:.6f}\n") fobj.write("#done defining vertices\n\n") # (3) define faces and materials for each face for i in range(0, self.triangles.shape[0]): @@ -2538,8 +2536,8 @@ def _upload_to_sketchfab(self, data, files): import requests SKETCHFAB_DOMAIN = "sketchfab.com" - SKETCHFAB_API_URL = "https://api.{}/v2/models".format(SKETCHFAB_DOMAIN) - SKETCHFAB_MODEL_URL = "https://{}/models/".format(SKETCHFAB_DOMAIN) + SKETCHFAB_API_URL = f"https://api.{SKETCHFAB_DOMAIN}/v2/models" + SKETCHFAB_MODEL_URL = f"https://{SKETCHFAB_DOMAIN}/models/" try: r = requests.post(SKETCHFAB_API_URL, data=data, files=files, verify=False) @@ -2722,7 +2720,7 @@ def _sanitize_ptypes(self, ptypes): self.ds.index for ptype in ptypes: if ptype not in self.ds.particle_types: - mess = "{} not found. Particle type must ".format(ptype) + mess = f"{ptype} not found. Particle type must " mess += "be in the dataset!" raise TypeError(mess) @@ -2814,7 +2812,7 @@ def scatter_smooth(self, fields, units, normalize): buff_den = np.empty(0) ptype = fields[0] - pbar = tqdm(desc="Interpolating (scatter) SPH field {}".format(fields[0])) + pbar = tqdm(desc=f"Interpolating (scatter) SPH field {fields[0]}") for chunk in self._data_source.chunks([fields], "io"): px = chunk[(ptype, "particle_position_x")].in_base("code").d py = chunk[(ptype, "particle_position_y")].in_base("code").d diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 91c6dab0b30..627cf71f697 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -393,7 +393,7 @@ def _generate_particle_field(self, field): ind = 0 for _io_chunk in self.chunks([], "io", cache=False): for _chunk in self.chunks(field, "spatial"): - x, y, z = (self[ftype, "particle_position_%s" % ax] for ax in "xyz") + x, y, z = (self[ftype, f"particle_position_{ax}"] for ax in "xyz") if x.size == 0: continue mask = self._current_chunk.objs[0].select_particles( @@ -417,7 +417,7 @@ def _count_particles(self, ftype): size = 0 for _io_chunk in self.chunks([], "io", cache=False): for _chunk in self.chunks([], "spatial"): - x, y, z = (self[ftype, "particle_position_%s" % ax] for ax in "xyz") + x, y, z = (self[ftype, f"particle_position_{ax}"] for ax in "xyz") if x.size == 0: continue size += self._current_chunk.objs[0].count_particles( @@ -626,7 +626,7 @@ def save_as_dataset(self, filename=None, fields=None): """ - keyword = "%s_%s" % (str(self.ds), self._type_name) + keyword = f"{str(self.ds)}_{self._type_name}" filename = get_output_filename(filename, keyword, ".h5") data = {} @@ -664,7 +664,7 @@ def save_as_dataset(self, filename=None, fields=None): if need_particle_positions: for ax in self.ds.coordinates.axis_order: for ptype in ptypes: - p_field = (ptype, "particle_position_%s" % ax) + p_field = (ptype, f"particle_position_{ax}") if p_field in self.ds.field_info and p_field not in data: data_fields.append(field) ftypes[p_field] = p_field[0] @@ -881,7 +881,7 @@ def create_firefly_object( ## the UI name if log_flag: units = units[len("log(") : -1] - field = "log{}".format(field) + field = f"log{field}" ## perform the unit conversion and take the log if ## necessary. @@ -1052,7 +1052,7 @@ def max(self, field, axis=None): r = self.ds.proj(field, axis, data_source=self, method="mip") return r else: - raise NotImplementedError("Unknown axis %s" % axis) + raise NotImplementedError(f"Unknown axis {axis}") def min(self, field, axis=None): r"""Compute the minimum of a field. @@ -1092,7 +1092,7 @@ def min(self, field, axis=None): "Minimum intensity projection not" " implemented." ) else: - raise NotImplementedError("Unknown axis %s" % axis) + raise NotImplementedError(f"Unknown axis {axis}") def std(self, field, weight=None): """Compute the variance of a field. @@ -1264,7 +1264,7 @@ def mean(self, field, axis=None, weight=None): elif axis is None: r = self.quantities.weighted_average_quantity(field, weight_field) else: - raise NotImplementedError("Unknown axis %s" % axis) + raise NotImplementedError(f"Unknown axis {axis}") return r def sum(self, field, axis=None): @@ -1301,7 +1301,7 @@ def sum(self, field, axis=None): elif axis is None: r = self.quantities.total_quantity(field) else: - raise NotImplementedError("Unknown axis %s" % axis) + raise NotImplementedError(f"Unknown axis {axis}") return r def integrate(self, field, weight=None, axis=None): @@ -1334,12 +1334,12 @@ def integrate(self, field, weight=None, axis=None): if axis in self.ds.coordinates.axis_name: r = self.ds.proj(field, axis, data_source=self, weight_field=weight_field) else: - raise NotImplementedError("Unknown axis %s" % axis) + raise NotImplementedError(f"Unknown axis {axis}") return r @property def _hash(self): - s = "%s" % self + s = f"{self}" try: import hashlib @@ -1386,7 +1386,7 @@ def clone(self): def __repr__(self): # We'll do this the slow way to be clear what's going on - s = "%s (%s): " % (self.__class__.__name__, self.ds) + s = f"{self.__class__.__name__} ({self.ds}): " for i in self._con_args: try: s += ", %s=%s" % ( @@ -1394,7 +1394,7 @@ def __repr__(self): getattr(self, i).in_base(unit_system=self.ds.unit_system), ) except AttributeError: - s += ", %s=%s" % (i, getattr(self, i)) + s += f", {i}={getattr(self, i)}" return s @contextmanager @@ -1572,7 +1572,7 @@ def selector(self): if self._selector is not None: return self._selector s_module = getattr(self, "_selector_module", yt.geometry.selection_routines) - sclass = getattr(s_module, "%s_selector" % self._type_name, None) + sclass = getattr(s_module, f"{self._type_name}_selector", None) if sclass is None: raise YTDataSelectorNotImplemented(self._type_name) @@ -2720,7 +2720,7 @@ def extract_isocontours( for v1 in verts: f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2])) for i in range(len(verts) // 3): - f.write("f %s %s %s\n" % (i * 3 + 1, i * 3 + 2, i * 3 + 3)) + f.write(f"f {i * 3 + 1} {i * 3 + 2} {i * 3 + 3}\n") if not hasattr(filename, "write"): f.close() if sample_values is not None: @@ -2880,8 +2880,8 @@ def extract_connected_sets( if cid == -1: continue contours[level][cid] = base_object.cut_region( - ["obj['contours_%s'] == %s" % (contour_key, cid)], - {"contour_slices_%s" % contour_key: cids}, + [f"obj['contours_{contour_key}'] == {cid}"], + {f"contour_slices_{contour_key}": cids}, ) return cons, contours @@ -2957,7 +2957,7 @@ def __init__( self.op = op.upper() self.dobj1 = dobj1 self.dobj2 = dobj2 - name = "Boolean%sSelector" % (self.op,) + name = f"Boolean{self.op}Selector" sel_cls = getattr(yt.geometry.selection_routines, name) self._selector = sel_cls(self) diff --git a/yt/data_objects/derived_quantities.py b/yt/data_objects/derived_quantities.py index cfaeb9a0683..62b42b01007 100644 --- a/yt/data_objects/derived_quantities.py +++ b/yt/data_objects/derived_quantities.py @@ -22,7 +22,7 @@ def get_position_fields(field, data): ftype = finfo.alias_name[0] else: ftype = finfo.name[0] - position_fields = [(ftype, "particle_position_%s" % d) for d in axis_names] + position_fields = [(ftype, f"particle_position_{d}") for d in axis_names] else: position_fields = axis_names @@ -267,7 +267,7 @@ def process_chunk( if self.use_particles: vals += [ ( - data[particle_type, "particle_position_%s" % ax] + data[particle_type, f"particle_position_{ax}"] * data[particle_type, "particle_mass"] ).sum(dtype=np.float64) for ax in "xyz" @@ -339,7 +339,7 @@ def process_chunk( vals = [] if use_gas: vals += [ - (data["gas", "velocity_%s" % ax] * data["gas", "mass"]).sum( + (data["gas", f"velocity_{ax}"] * data["gas", "mass"]).sum( dtype=np.float64 ) for ax in "xyz" @@ -348,7 +348,7 @@ def process_chunk( if use_particles and "nbody" in data.ds.particle_types: vals += [ ( - data[particle_type, "particle_velocity_%s" % ax] + data[particle_type, f"particle_velocity_{ax}"] * data[particle_type, "particle_mass"] ).sum(dtype=np.float64) for ax in "xyz" @@ -520,7 +520,7 @@ def process_chunk( rvals.extend( [ ( - data["gas", "specific_angular_momentum_%s" % axis] + data["gas", f"specific_angular_momentum_{axis}"] * data["gas", "mass"] ).sum(dtype=np.float64) for axis in "xyz" @@ -533,7 +533,7 @@ def process_chunk( ( data[ self.particle_type, - "particle_specific_angular_momentum_%s" % axis, + f"particle_specific_angular_momentum_{axis}", ] * data[self.particle_type, "particle_mass"] ).sum(dtype=np.float64) diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index a80d461f71d..3722c065326 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -363,7 +363,7 @@ def particle_operation(self, *args, **kwargs): def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): # Here we perform our particle deposition. - cls = getattr(particle_deposit, "deposit_%s" % method, None) + cls = getattr(particle_deposit, f"deposit_{method}", None) if cls is None: raise YTParticleDepositionNotImplemented(method) # We allocate number of zones, not number of octs. Everything diff --git a/yt/data_objects/level_sets/clump_handling.py b/yt/data_objects/level_sets/clump_handling.py index b5f5f927258..71dfd10d0a4 100644 --- a/yt/data_objects/level_sets/clump_handling.py +++ b/yt/data_objects/level_sets/clump_handling.py @@ -14,7 +14,7 @@ def add_contour_field(ds, contour_key): def _contours(field, data): - fd = data.get_field_parameter("contour_slices_%s" % contour_key) + fd = data.get_field_parameter(f"contour_slices_{contour_key}") vals = data["index", "ones"] * -1 if fd is None or fd == 0.0: return vals @@ -23,7 +23,7 @@ def _contours(field, data): return vals ds.add_field( - ("index", "contours_%s" % contour_key), + ("index", f"contours_{contour_key}"), function=_contours, validators=[ValidateSpatial(0)], take_log=False, @@ -164,8 +164,8 @@ def find_children(self, min_val, max_val=None): if cid == -1: continue new_clump = base_object.cut_region( - ["obj['contours_%s'] == %s" % (contour_key, cid)], - {("contour_slices_%s" % contour_key): cids}, + [f"obj['contours_{contour_key}'] == {cid}"], + {(f"contour_slices_{contour_key}"): cids}, ) if new_clump["ones"].size == 0: # This is to skip possibly duplicate clumps. @@ -295,7 +295,7 @@ def save_as_dataset(self, filename=None, fields=None): # collect data fields if fields is not None: contour_fields = [ - ("index", "contours_%s" % ckey) + ("index", f"contours_{ckey}") for ckey in np.unique(clump_info["contour_key"]) if str(ckey) != "-1" ] @@ -320,7 +320,7 @@ def save_as_dataset(self, filename=None, fields=None): if len(ptypes) > 0: for ax in "xyz": for ptype in ptypes: - p_field = (ptype, "particle_position_%s" % ax) + p_field = (ptype, f"particle_position_{ax}") if p_field in ds.field_info and p_field not in field_data: ftypes[p_field] = p_field[0] field_data[p_field] = self.base[p_field] @@ -329,7 +329,7 @@ def save_as_dataset(self, filename=None, fields=None): if clump.contour_key is None: continue for ptype in ptypes: - cfield = (ptype, "contours_%s" % clump.contour_key) + cfield = (ptype, f"contours_{clump.contour_key}") if cfield not in field_data: field_data[cfield] = clump.data._part_ind(ptype).astype( np.int64 @@ -357,7 +357,7 @@ def save_as_dataset(self, filename=None, fields=None): ftype = "index" else: ftype = field[0] - cfield = (ftype, "contours_%s" % self.contour_key) + cfield = (ftype, f"contours_{self.contour_key}") if cfield not in cfilters: cfilters[cfield] = field_data[cfield] == self.contour_id field_data[field] = field_data[field][cfilters[cfield]] diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index a72772753e9..78e1def558f 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -163,7 +163,7 @@ def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): # Here we perform our particle deposition. if fields is None: fields = [] - cls = getattr(particle_deposit, "deposit_%s" % method, None) + cls = getattr(particle_deposit, f"deposit_{method}", None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz @@ -346,7 +346,7 @@ def smooth( fields = [] if index_fields is None: index_fields = [] - cls = getattr(particle_smooth, "%s_smooth" % method, None) + cls = getattr(particle_smooth, f"{method}_smooth", None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz @@ -447,7 +447,7 @@ def particle_operation( pdom_ind = particle_octree.domain_ind(self.selector) if fields is None: fields = [] - cls = getattr(particle_smooth, "%s_smooth" % method, None) + cls = getattr(particle_smooth, f"{method}_smooth", None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz @@ -586,7 +586,7 @@ def __init__(self, octree_subset): # Cache some attributes for attr in ["ires", "icoords", "fcoords", "fwidth"]: v = getattr(octree_subset, attr) - setattr(self, "_%s" % attr, octree_subset._reshape_vals(v)) + setattr(self, f"_{attr}", octree_subset._reshape_vals(v)) def __iter__(self): for i in range(self._ires.shape[-1]): diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 8e600f948ce..562772667df 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -99,7 +99,7 @@ def __init__( self.sorts.append(sort) pfields = {} - for field in ("particle_position_%s" % ax for ax in "xyz"): + for field in (f"particle_position_{ax}" for ax in "xyz"): pfields[field] = dd[fds[field]].ndarray_view()[mask][sort] sto.result_id = ds.parameter_filename @@ -117,7 +117,7 @@ def __init__( self.particle_fields = [] output_field = np.empty((self.num_indices, self.num_steps)) output_field.fill(np.nan) - for field in ("particle_position_%s" % ax for ax in "xyz"): + for field in (f"particle_position_{ax}" for ax in "xyz"): for i, (_fn, (_time, indices, pfields)) in enumerate(sorted_storage): try: # This will fail if particles ids are @@ -238,7 +238,7 @@ def _get_data(self, fields): ] step = int(0) pbar = get_pbar( - "Generating [%s] fields in trajectories" % ", ".join(missing_fields), + f"Generating [{', '.join(missing_fields)}] fields in trajectories", self.num_steps, ) my_storage = {} @@ -391,4 +391,4 @@ def write_out_h5(self, filename): self.times.write_hdf5(filename, dataset_name="particle_times") fields = [field for field in sorted(self.field_data.keys())] for field in fields: - self[field].write_hdf5(filename, dataset_name="%s" % field) + self[field].write_hdf5(filename, dataset_name=f"{field}") diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index e15497b73f8..1364e81fbbf 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -137,7 +137,7 @@ def set_field_unit(self, field, new_unit): if fd in self.field_units: self.field_units[fd] = Unit(new_unit, registry=self.ds.unit_registry) else: - raise KeyError("%s not in profile!" % (field)) + raise KeyError(f"{field} not in profile!") def _finalize_storage(self, fields, temp_storage): # We use our main comm here @@ -363,7 +363,7 @@ def save_as_dataset(self, filename=None): """ - keyword = "%s_%s" % (str(self.ds), self.__class__.__name__) + keyword = f"{str(self.ds)}_{self.__class__.__name__}" filename = get_output_filename(filename, keyword, ".h5") args = ("field", "log") @@ -392,18 +392,18 @@ def save_as_dataset(self, filename=None): dimensionality += 1 data[ax] = getattr(self, ax) bin_data.append(data[ax]) - bin_field_name = "%s_bins" % ax + bin_field_name = f"{ax}_bins" data[bin_field_name] = getattr(self, bin_field_name) - extra_attrs["%s_range" % ax] = self.ds.arr( + extra_attrs[f"{ax}_range"] = self.ds.arr( [data[bin_field_name][0], data[bin_field_name][-1]] ) for arg in args: - key = "%s_%s" % (ax, arg) + key = f"{ax}_{arg}" extra_attrs[key] = getattr(self, key) bin_fields = np.meshgrid(*bin_data) for i, ax in enumerate("xyz"[:dimensionality]): - data[getattr(self, "%s_field" % ax)] = bin_fields[i] + data[getattr(self, f"{ax}_field")] = bin_fields[i] extra_attrs["dimensionality"] = dimensionality ftypes = dict([(field, "data") for field in data if field[0] != std]) @@ -428,9 +428,9 @@ def __init__(self, ds): exclude_fields = ["used", "weight"] for ax in "xyz"[: ds.dimensionality]: setattr(self, ax, ds.data[ax]) - ax_bins = "%s_bins" % ax - ax_field = "%s_field" % ax - ax_log = "%s_log" % ax + ax_bins = f"{ax}_bins" + ax_field = f"{ax}_field" + ax_log = f"{ax}_log" setattr(self, ax_bins, ds.data[ax_bins]) field_name = tuple(ds.parameters.get(ax_field, (None, None))) setattr(self, ax_field, field_name) @@ -1452,7 +1452,7 @@ def create_profile( kwargs["deposition"] = deposition if override_bins is not None: for o_bin, ax in zip(o_bins, ["x", "y", "z"]): - kwargs["override_bins_{0}".format(ax)] = o_bin + kwargs[f"override_bins_{ax}"] = o_bin obj = cls(*args, **kwargs) obj.accumulation = accumulation obj.fractional = fractional diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 4c501862e5c..80f041db515 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -150,8 +150,8 @@ def __init__(self, axis, coords, ds=None, field_parameters=None, data_source=Non self.px_ax = xax self.py_ax = yax # Even though we may not be using x,y,z we use them here. - self.px_dx = "d%s" % ("xyz"[self.px_ax]) - self.py_dx = "d%s" % ("xyz"[self.py_ax]) + self.px_dx = f"d{'xyz'[self.px_ax]}" + self.py_dx = f"d{'xyz'[self.py_ax]}" # Convert coordinates to code length. if isinstance(coords[0], YTQuantity): self.px = self.ds.quan(coords[0]).to("code_length") @@ -884,7 +884,7 @@ def __init__(self, points, ds=None, field_parameters=None, data_source=None): points = fix_length(points, ds) if len(points) < 2: raise YTException( - "Not enough points. Expected at least 2, got %s" % len(points) + f"Not enough points. Expected at least 2, got {len(points)}" ) mylog.debug("Building minimal sphere around points.") mb = _miniball.Miniball(points) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 2c75c39061a..eb866c68ef4 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -309,7 +309,7 @@ def _set_derived_attrs(self): if not isinstance(self.current_time, YTQuantity): self.current_time = self.quan(self.current_time, "code_time") for attr in ("center", "width", "left_edge", "right_edge"): - n = "domain_%s" % attr + n = f"domain_{attr}" v = getattr(self, n) if not isinstance(v, YTArray) and v is not None: # Note that we don't add on _ipython_display_ here because @@ -325,7 +325,7 @@ def __repr__(self): return self.basename def _hash(self): - s = "%s;%s;%s" % (self.basename, self.current_time, self.unique_identifier) + s = f"{self.basename};{self.current_time};{self.unique_identifier}" try: import hashlib @@ -619,7 +619,7 @@ def set_field_label_format(self, format_property, value): available_formats = {"ionization_label": ("plus_minus", "roman_numeral")} if format_property in available_formats: if value in available_formats[format_property]: - setattr(self, "_%s_format" % format_property, value) + setattr(self, f"_{format_property}_format", value) else: raise ValueError( "{0} not an acceptable value for format_property " @@ -1123,7 +1123,7 @@ def set_units(self): self.unit_registry.modify("h", self.hubble_constant) # Comoving lengths for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit + new_unit = f"{my_unit}cm" my_u = Unit(my_unit, registry=self.unit_registry) self.unit_registry.add( new_unit, @@ -1223,14 +1223,14 @@ def _override_code_units(self): ("magnetic", "gauss"), ("temperature", "K"), ]: - val = self.units_override.get("%s_unit" % unit, None) + val = self.units_override.get(f"{unit}_unit", None) if val is not None: if isinstance(val, YTQuantity): val = (val.v, str(val.units)) elif not isinstance(val, tuple): val = (val, cgs) mylog.info("Overriding %s_unit: %g %s.", unit, val[0], val[1]) - setattr(self, "%s_unit" % unit, self.quan(val[0], val[1])) + setattr(self, f"{unit}_unit", self.quan(val[0], val[1])) _units = None _unit_system_id = None @@ -1490,7 +1490,7 @@ def add_deposited_particle_field( field_name = field_name % (ptype, deposit_field.replace("particle_", "")) if method == "count": - field_name = "%s_count" % ptype + field_name = f"{ptype}_count" if ("deposit", field_name) in self.field_info: mylog.warning("The deposited field %s already exists", field_name) return ("deposit", field_name) @@ -1598,7 +1598,7 @@ def add_gradient_fields(self, input_field): # Now we make a list of the fields that were just made, to check them # and to return them grad_fields = [ - (ftype, input_field + "_gradient_%s" % suffix) + (ftype, input_field + f"_gradient_{suffix}") for suffix in self.coordinates.axis_order ] grad_fields.append((ftype, input_field + "_gradient_magnitude")) diff --git a/yt/data_objects/tests/test_covering_grid.py b/yt/data_objects/tests/test_covering_grid.py index b233d03c1c8..24bee6adf40 100644 --- a/yt/data_objects/tests/test_covering_grid.py +++ b/yt/data_objects/tests/test_covering_grid.py @@ -36,10 +36,10 @@ def test_covering_grid(): dn = ds.refine_by ** level cg = ds.covering_grid(level, [0.0, 0.0, 0.0], dn * ds.domain_dimensions) # Test coordinate generation - assert_equal(np.unique(cg["d%s" % axis_name[0]]).size, 1) + assert_equal(np.unique(cg[f"d{axis_name[0]}"]).size, 1) xmi = cg[axis_name[0]].min() xma = cg[axis_name[0]].max() - dx = cg["d%s" % axis_name[0]].flat[0:1] + dx = cg[f"d{axis_name[0]}"].flat[0:1] edges = ds.arr([[0, 1], [0, 1], [0, 1]], "code_length") assert_equal(xmi, edges[0, 0] + dx / 2.0) assert_equal(xmi, cg[axis_name[0]][0, 0, 0]) @@ -47,20 +47,20 @@ def test_covering_grid(): assert_equal(xma, edges[0, 1] - dx / 2.0) assert_equal(xma, cg[axis_name[0]][-1, 0, 0]) assert_equal(xma, cg[axis_name[0]][-1, 1, 1]) - assert_equal(np.unique(cg["d%s" % axis_name[1]]).size, 1) + assert_equal(np.unique(cg[f"d{axis_name[1]}"]).size, 1) ymi = cg[axis_name[1]].min() yma = cg[axis_name[1]].max() - dy = cg["d%s" % axis_name[1]][0] + dy = cg[f"d{axis_name[1]}"][0] assert_equal(ymi, edges[1, 0] + dy / 2.0) assert_equal(ymi, cg[axis_name[1]][0, 0, 0]) assert_equal(ymi, cg[axis_name[1]][1, 0, 1]) assert_equal(yma, edges[1, 1] - dy / 2.0) assert_equal(yma, cg[axis_name[1]][0, -1, 0]) assert_equal(yma, cg[axis_name[1]][1, -1, 1]) - assert_equal(np.unique(cg["d%s" % axis_name[2]]).size, 1) + assert_equal(np.unique(cg[f"d{axis_name[2]}"]).size, 1) zmi = cg[axis_name[2]].min() zma = cg[axis_name[2]].max() - dz = cg["d%s" % axis_name[2]][0] + dz = cg[f"d{axis_name[2]}"][0] assert_equal(zmi, edges[2, 0] + dz / 2.0) assert_equal(zmi, cg[axis_name[2]][0, 0, 0]) assert_equal(zmi, cg[axis_name[2]][1, 1, 0]) diff --git a/yt/data_objects/tests/test_fluxes.py b/yt/data_objects/tests/test_fluxes.py index 66b6982ad30..d6c33c385fa 100644 --- a/yt/data_objects/tests/test_fluxes.py +++ b/yt/data_objects/tests/test_fluxes.py @@ -92,8 +92,8 @@ def test_export_obj(self): color_field_min=mi, ) - assert os.path.exists("%s.obj" % basename) - assert os.path.exists("%s.mtl" % basename) + assert os.path.exists(f"{basename}.obj") + assert os.path.exists(f"{basename}.mtl") def _Emissivity(field, data): return data["density"] * data["density"] * np.sqrt(data["temperature"]) @@ -117,8 +117,8 @@ def _Emissivity(field, data): ) basename = "my_galaxy_emis" - assert os.path.exists("%s.obj" % basename) - assert os.path.exists("%s.mtl" % basename) + assert os.path.exists(f"{basename}.obj") + assert os.path.exists(f"{basename}.mtl") def test_correct_output_unit_fake_ds(): diff --git a/yt/data_objects/tests/test_pickle.py b/yt/data_objects/tests/test_pickle.py index 16afb91839f..238ad713371 100644 --- a/yt/data_objects/tests/test_pickle.py +++ b/yt/data_objects/tests/test_pickle.py @@ -38,10 +38,10 @@ def test_save_load_pickle(): # load object test_load = pickle.load(open(cpklfile.name, "rb")) - assert_equal.description = "%s: File was pickle-loaded successfully" % __name__ + assert_equal.description = f"{__name__}: File was pickle-loaded successfully" assert_equal(test_load is not None, True) assert_equal.description = ( - "%s: Length of pickle-loaded connected set object" % __name__ + f"{__name__}: Length of pickle-loaded connected set object" ) assert_equal(len(contours[1][0]), len(test_load)) diff --git a/yt/data_objects/tests/test_profiles.py b/yt/data_objects/tests/test_profiles.py index 835668876ca..b1d940da770 100644 --- a/yt/data_objects/tests/test_profiles.py +++ b/yt/data_objects/tests/test_profiles.py @@ -33,7 +33,7 @@ def test_profiles(): for nb in [8, 16, 32, 64]: for input_units in ["mks", "cgs"]: (rmi, rma), (tmi, tma), (dmi, dma) = [ - getattr(ex, "in_%s" % input_units)() + getattr(ex, f"in_{input_units}")() for ex in dd.quantities["Extrema"]( ["density", "temperature", "dinosaurs"] ) diff --git a/yt/data_objects/tests/test_projection.py b/yt/data_objects/tests/test_projection.py index e11123a417c..a6875aad3e6 100644 --- a/yt/data_objects/tests/test_projection.py +++ b/yt/data_objects/tests/test_projection.py @@ -94,7 +94,7 @@ def test_projection(pf): else: proj_unit = "cm" if field_unit != "" and field_unit != Unit(): - proj_unit = "({0}) * {1}".format(field_unit, proj_unit) + proj_unit = f"({field_unit}) * {proj_unit}" assert_equal( frb[proj_field].units, Unit(proj_unit, registry=ds.unit_registry), @@ -112,7 +112,7 @@ def test_projection(pf): # wf == None assert_equal(wf, None) v1 = proj["density"].sum() - v2 = (dd["density"] * dd["d%s" % an]).sum() + v2 = (dd["density"] * dd[f"d{an}"]).sum() assert_rel_equal(v1, v2.in_units(v1.units), 10) teardown_func(fns) diff --git a/yt/data_objects/tests/test_sph_data_objects.py b/yt/data_objects/tests/test_sph_data_objects.py index f8c191809f7..4c228f4b5b5 100644 --- a/yt/data_objects/tests/test_sph_data_objects.py +++ b/yt/data_objects/tests/test_sph_data_objects.py @@ -7,7 +7,7 @@ def test_point(): ds = fake_sph_orientation_ds() field_data = ds.stream_handler.fields["stream_file"] - ppos = [field_data["io", "particle_position_%s" % d] for d in "xyz"] + ppos = [field_data["io", f"particle_position_{d}"] for d in "xyz"] ppos = np.array(ppos).T for pos in ppos: for i in range(-1, 2): diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index 2ec603a6f9e..e5dabff57bf 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -7,7 +7,7 @@ def test_pattern_expansion(): - file_list = ["fake_data_file_{}".format(str(i).zfill(4)) for i in range(10)] + file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)] with tempfile.TemporaryDirectory() as tmpdir: for file in file_list: diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index c586e46678d..20e4f669732 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -69,7 +69,7 @@ def get_filenames_from_glob_pattern(outputs): # we try to match the pattern from the test data dir file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern)) if not file_list: - raise OSError("No match found for pattern : {}".format(pattern)) + raise OSError(f"No match found for pattern : {pattern}") return sorted(file_list) diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/unstructured_mesh.py index 54212d7cb8a..d3a04a01d36 100644 --- a/yt/data_objects/unstructured_mesh.py +++ b/yt/data_objects/unstructured_mesh.py @@ -98,7 +98,7 @@ def select_tcoords(self, dobj): def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): raise NotImplementedError # Here we perform our particle deposition. - cls = getattr(particle_deposit, "deposit_%s" % method, None) + cls = getattr(particle_deposit, f"deposit_{method}", None) if cls is None: raise YTParticleDepositionNotImplemented(method) # We allocate number of zones, not number of octs diff --git a/yt/exthook.py b/yt/exthook.py index be4391c8b6b..98906b796c8 100644 --- a/yt/exthook.py +++ b/yt/exthook.py @@ -86,7 +86,7 @@ def load_module(self, fullname): if "." not in modname: setattr(sys.modules[self.wrapper_module], modname, module) return module - raise ImportError("No module named %s" % fullname) + raise ImportError(f"No module named {fullname}") def is_important_traceback(self, important_module, tb): """Walks a traceback's frames and checks if any of the frames diff --git a/yt/fields/derived_field.py b/yt/fields/derived_field.py index 840cf6749e8..89959126994 100644 --- a/yt/fields/derived_field.py +++ b/yt/fields/derived_field.py @@ -282,7 +282,7 @@ def __call__(self, data): if self._function is NullFunc: raise RuntimeError( "Something has gone terribly wrong, _function is NullFunc " - + "for %s" % (self.name,) + + f"for {self.name}" ) with self.unit_registry(data): dd = self._function(self, data) @@ -341,16 +341,16 @@ def __repr__(self): if self._function == NullFunc: s = "On-Disk Field " elif func_name == "_TranslationFunc": - s = 'Alias Field for "%s" ' % (self.alias_name,) + s = f'Alias Field for "{self.alias_name}" ' else: s = "Derived Field " if isinstance(self.name, tuple): s += "(%s, %s): " % self.name else: - s += "%s: " % (self.name) - s += "(units: %s" % self.units + s += f"{self.name}: " + s += f"(units: {self.units}" if self.display_name is not None: - s += ", display_name: '%s'" % (self.display_name) + s += f", display_name: '{self.display_name}'" if self.sampling_type == "particle": s += ", particle field" s += ")" diff --git a/yt/fields/field_exceptions.py b/yt/fields/field_exceptions.py index 8fecc6156f9..d37e8653284 100644 --- a/yt/fields/field_exceptions.py +++ b/yt/fields/field_exceptions.py @@ -8,7 +8,7 @@ def __init__(self, ghost_zones=0, fields=None): self.fields = fields def __str__(self): - return "(%s, %s)" % (self.ghost_zones, self.fields) + return f"({self.ghost_zones}, {self.fields})" class NeedsOriginalGrid(NeedsGridType): @@ -21,7 +21,7 @@ def __init__(self, missing_fields): self.missing_fields = missing_fields def __str__(self): - return "(%s)" % (self.missing_fields) + return f"({self.missing_fields})" class NeedsProperty(ValidationException): @@ -29,7 +29,7 @@ def __init__(self, missing_properties): self.missing_properties = missing_properties def __str__(self): - return "(%s)" % (self.missing_properties) + return f"({self.missing_properties})" class NeedsParameter(ValidationException): @@ -37,7 +37,7 @@ def __init__(self, missing_parameters): self.missing_parameters = missing_parameters def __str__(self): - return "(%s)" % (self.missing_parameters) + return f"({self.missing_parameters})" class NeedsConfiguration(ValidationException): @@ -46,7 +46,7 @@ def __init__(self, parameter, value): self.value = value def __str__(self): - return "(Needs %s = %s)" % (self.parameter, self.value) + return f"(Needs {self.parameter} = {self.value})" class FieldUnitsError(Exception): diff --git a/yt/fields/field_functions.py b/yt/fields/field_functions.py index ddec1aa8192..19b4ee9e530 100644 --- a/yt/fields/field_functions.py +++ b/yt/fields/field_functions.py @@ -20,9 +20,7 @@ def get_radius(data, field_prefix, ftype): # This will coerce the units, so we don't need to worry that we copied # it from a cm**2 array. np.subtract( - data[ftype, "%s%s" % (field_prefix, ax)].in_base(unit_system.name), - center[i], - r, + data[ftype, f"{field_prefix}{ax}"].in_base(unit_system.name), center[i], r, ) if data.ds.periodicity[i]: np.abs(r, r) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index 6597103012a..d7e0a25b5f9 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -120,8 +120,8 @@ def setup_particle_fields(self, ptype, ftype="gas", num_neighbors=64): self.pop((ptype, "particle_position")) particle_vector_functions( ptype, - ["particle_position_%s" % ax for ax in "xyz"], - ["particle_velocity_%s" % ax for ax in "xyz"], + [f"particle_position_{ax}" for ax in "xyz"], + [f"particle_velocity_{ax}" for ax in "xyz"], self, ) particle_deposition_functions(ptype, "particle_position", "particle_mass", self) @@ -206,7 +206,7 @@ def setup_fluid_aliases(self, ftype="gas"): units = self.ds.field_units.get(field[1], units) units = self.ds.field_units.get(field, units) if not isinstance(units, str) and args[0] != "": - units = "((%s)*%s)" % (args[0], units) + units = f"(({args[0]})*{units})" if ( isinstance(units, (numeric_type, np.number, np.ndarray)) and args[0] == "" @@ -233,17 +233,17 @@ def setup_fluid_aliases(self, ftype="gas"): to_convert = False else: for suffix in ["x", "y", "z"]: - if "%s_%s" % (alias[:-2], suffix) not in aliases_gallery: + if f"{alias[:-2]}_{suffix}" not in aliases_gallery: to_convert = False break to_convert = True if to_convert: if alias[-2:] == "_x": - alias = "%s_%s" % (alias[:-2], axis_names[0]) + alias = f"{alias[:-2]}_{axis_names[0]}" elif alias[-2:] == "_y": - alias = "%s_%s" % (alias[:-2], axis_names[1]) + alias = f"{alias[:-2]}_{axis_names[1]}" elif alias[-2:] == "_z": - alias = "%s_%s" % (alias[:-2], axis_names[2]) + alias = f"{alias[:-2]}_{axis_names[2]}" self.alias((ftype, alias), field) @staticmethod @@ -432,7 +432,7 @@ def has_key(self, key): def __missing__(self, key): if self.fallback is None: - raise KeyError("No field named %s" % (key,)) + raise KeyError(f"No field named {key}") return self.fallback[key] @classmethod diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index ef8588f706a..e71460d10c1 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -169,10 +169,10 @@ def _metal_mass(field, data): def _number_density(field, data): field_data = np.zeros_like( - data["gas", "%s_number_density" % data.ds.field_info.species_names[0]] + data["gas", f"{data.ds.field_info.species_names[0]}_number_density"] ) for species in data.ds.field_info.species_names: - field_data += data["gas", "%s_number_density" % species] + field_data += data["gas", f"{species}_number_density"] return field_data else: @@ -271,7 +271,7 @@ def func(field, data): for axi, ax in enumerate(registry.ds.coordinates.axis_order): f = grad_func(axi, ax) registry.add_field( - (ftype, "%s_gradient_%s" % (fname, ax)), + (ftype, f"{fname}_gradient_{ax}"), sampling_type="local", function=f, validators=[ValidateSpatial(1, [grad_field])], @@ -280,7 +280,7 @@ def func(field, data): create_magnitude_field( registry, - "%s_gradient" % fname, + f"{fname}_gradient", grad_units, ftype=ftype, validators=[ValidateSpatial(1, [grad_field])], diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 69b4c7b542e..74b54c72284 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -52,11 +52,11 @@ def _baroclinic_vorticity_z(field, data): bv_validators = [ValidateSpatial(1, [(ftype, "density"), (ftype, "pressure")])] for ax in "xyz": - n = "baroclinic_vorticity_%s" % ax + n = f"baroclinic_vorticity_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), validators=bv_validators, units=unit_system["frequency"] ** 2, ) @@ -110,16 +110,16 @@ def _vorticity_z(field, data): return new_field vort_validators = [ - ValidateSpatial(1, [(ftype, "velocity_%s" % d) for d in "xyz"]), + ValidateSpatial(1, [(ftype, f"velocity_{d}") for d in "xyz"]), ValidateParameter("bulk_velocity"), ] for ax in "xyz": - n = "vorticity_%s" % ax + n = f"vorticity_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), units=unit_system["frequency"], validators=vort_validators, ) @@ -151,11 +151,11 @@ def _vorticity_stretching_z(field, data): return data[ftype, "velocity_divergence"] * data[ftype, "vorticity_z"] for ax in "xyz": - n = "vorticity_stretching_%s" % ax + n = f"vorticity_stretching_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), units=unit_system["frequency"] ** 2, validators=vort_validators, ) @@ -188,11 +188,11 @@ def _vorticity_growth_z(field, data): ) for ax in "xyz": - n = "vorticity_growth_%s" % ax + n = f"vorticity_growth_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), units=unit_system["frequency"] ** 2, validators=vort_validators, ) @@ -206,8 +206,7 @@ def _vorticity_growth_magnitude(field, data): dot = data.ds.arr(np.zeros(result.shape), "") for ax in "xyz": dot += ( - data[ftype, "vorticity_%s" % ax] - * data[ftype, "vorticity_growth_%s" % ax] + data[ftype, f"vorticity_{ax}"] * data[ftype, f"vorticity_growth_{ax}"] ).to_ndarray() result = np.sign(dot) * result return result @@ -290,11 +289,11 @@ def _vorticity_radiation_pressure_z(field, data): ) ] for ax in "xyz": - n = "vorticity_radiation_pressure_%s" % ax + n = f"vorticity_radiation_pressure_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), units=unit_system["frequency"] ** 2, validators=vrp_validators, ) @@ -330,11 +329,11 @@ def _vorticity_radiation_pressure_growth_z(field, data): ) for ax in "xyz": - n = "vorticity_radiation_pressure_growth_%s" % ax + n = f"vorticity_radiation_pressure_growth_{ax}" registry.add_field( (ftype, n), sampling_type="cell", - function=eval("_%s" % n), + function=eval(f"_{n}"), units=unit_system["frequency"] ** 2, validators=vrp_validators, ) @@ -348,8 +347,7 @@ def _vorticity_radiation_pressure_growth_magnitude(field, data): dot = data.ds.arr(np.zeros(result.shape), "") for ax in "xyz": dot += ( - data[ftype, "vorticity_%s" % ax] - * data[ftype, "vorticity_growth_%s" % ax] + data[ftype, f"vorticity_{ax}"] * data[ftype, f"vorticity_growth_{ax}"] ).to_ndarray() result = np.sign(dot) * result return result diff --git a/yt/fields/magnetic_field.py b/yt/fields/magnetic_field.py index cce3ce29fb1..82f6dc7ddf4 100644 --- a/yt/fields/magnetic_field.py +++ b/yt/fields/magnetic_field.py @@ -17,10 +17,10 @@ def setup_magnetic_field_fields(registry, ftype="gas", slice_info=None): axis_names = registry.ds.coordinates.axis_order - if (ftype, "magnetic_field_%s" % axis_names[0]) not in registry: + if (ftype, f"magnetic_field_{axis_names[0]}") not in registry: return - u = registry[ftype, "magnetic_field_%s" % axis_names[0]].units + u = registry[ftype, f"magnetic_field_{axis_names[0]}"].units def mag_factors(dims): if dims == dimensions.magnetic_field_cgs: @@ -29,9 +29,9 @@ def mag_factors(dims): return ds.units.physical_constants.mu_0 def _magnetic_field_strength(field, data): - xm = "relative_magnetic_field_%s" % axis_names[0] - ym = "relative_magnetic_field_%s" % axis_names[1] - zm = "relative_magnetic_field_%s" % axis_names[2] + xm = f"relative_magnetic_field_{axis_names[0]}" + ym = f"relative_magnetic_field_{axis_names[1]}" + zm = f"relative_magnetic_field_{axis_names[2]}" B2 = (data[ftype, xm]) ** 2 + (data[ftype, ym]) ** 2 + (data[ftype, zm]) ** 2 @@ -280,7 +280,7 @@ def _mag_field(field, data): for ax, fd in zip(registry.ds.coordinates.axis_order, ds_fields): registry.add_field( - (ftype, "magnetic_field_%s" % ax), + (ftype, f"magnetic_field_{ax}"), sampling_type=sampling_type, function=mag_field(fd), units=units, @@ -294,7 +294,7 @@ def _mag_field(field, data): return _mag_field for ax in registry.ds.coordinates.axis_order: - fname = "particle_magnetic_field_%s" % ax + fname = f"particle_magnetic_field_{ax}" registry.add_field( (ds_ftype, fname), sampling_type=sampling_type, @@ -303,4 +303,4 @@ def _mag_field(field, data): ) sph_ptypes = getattr(registry.ds, "_sph_ptypes", tuple()) if ds_ftype in sph_ptypes: - registry.alias((ftype, "magnetic_field_%s" % ax), (ds_ftype, fname)) + registry.alias((ftype, f"magnetic_field_{ax}"), (ds_ftype, fname)) diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index d0a515bdeb0..432e464a0c9 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -87,7 +87,7 @@ def particle_count(field, data): return data.apply_units(d, field.units) registry.add_field( - ("deposit", "%s_count" % ptype), + ("deposit", f"{ptype}_count"), sampling_type="cell", function=particle_count, validators=[ValidateSpatial()], @@ -103,7 +103,7 @@ def particle_mass(field, data): return data.apply_units(d, field.units) registry.add_field( - ("deposit", "%s_mass" % ptype), + ("deposit", f"{ptype}_mass"), sampling_type="cell", function=particle_mass, validators=[ValidateSpatial()], @@ -122,7 +122,7 @@ def particle_density(field, data): return d registry.add_field( - ("deposit", "%s_density" % ptype), + ("deposit", f"{ptype}_density"), sampling_type="cell", function=particle_density, validators=[ValidateSpatial()], @@ -138,7 +138,7 @@ def particle_cic(field, data): return d registry.add_field( - ("deposit", "%s_cic" % ptype), + ("deposit", f"{ptype}_cic"), sampling_type="cell", function=particle_cic, validators=[ValidateSpatial()], @@ -168,7 +168,7 @@ def _deposit_field(field, data): for ax in "xyz": for method, name in zip(("cic", "sum"), ("cic", "nn")): function = _get_density_weighted_deposit_field( - "particle_velocity_%s" % ax, "code_velocity", method + f"particle_velocity_{ax}", "code_velocity", method ) registry.add_field( ("deposit", ("%s_" + name + "_velocity_%s") % (ptype, ax)), @@ -244,13 +244,13 @@ def _particle_position(field, data): for axi, ax in enumerate("xyz"): v, p = _get_coord_funcs(axi, ptype) registry.add_field( - (ptype, "particle_velocity_%s" % ax), + (ptype, f"particle_velocity_{ax}"), sampling_type="particle", function=v, units="code_velocity", ) registry.add_field( - (ptype, "particle_position_%s" % ax), + (ptype, f"particle_position_{ax}"), sampling_type="particle", function=p, units="code_length", @@ -293,7 +293,7 @@ def get_angular_momentum_components(ptype, data, spos, svel): [0.0, 0.0, 1.0], "code_length" ) # default to simulation axis pos = data.ds.arr([data[ptype, spos % ax] for ax in "xyz"]).T - vel = data.ds.arr([data[ptype, "relative_%s" % (svel % ax)] for ax in "xyz"]).T + vel = data.ds.arr([data[ptype, f"relative_{svel % ax}"] for ax in "xyz"]).T return pos, vel, normal @@ -305,9 +305,9 @@ def standard_particle_fields( def _particle_velocity_magnitude(field, data): """ M{|v|} """ return np.sqrt( - data[ptype, "relative_%s" % (svel % "x")] ** 2 - + data[ptype, "relative_%s" % (svel % "y")] ** 2 - + data[ptype, "relative_%s" % (svel % "z")] ** 2 + data[ptype, f"relative_{svel % 'x'}"] ** 2 + + data[ptype, f"relative_{svel % 'y'}"] ** 2 + + data[ptype, f"relative_{svel % 'z'}"] ** 2 ) registry.add_field( @@ -346,7 +346,7 @@ def _particle_specific_angular_momentum_component(field, data): def _particle_angular_momentum_component(field, data): return ( data[_ptype, "particle_mass"] - * data[ptype, "particle_specific_angular_momentum_%s" % ax] + * data[ptype, f"particle_specific_angular_momentum_{ax}"] ) return ( @@ -357,14 +357,14 @@ def _particle_angular_momentum_component(field, data): for axi, ax in enumerate("xyz"): f, v = _get_spec_ang_mom_comp(axi, ax, ptype) registry.add_field( - (ptype, "particle_specific_angular_momentum_%s" % ax), + (ptype, f"particle_specific_angular_momentum_{ax}"), sampling_type="particle", function=f, units=unit_system["specific_angular_momentum"], validators=[ValidateParameter("center")], ) registry.add_field( - (ptype, "particle_angular_momentum_%s" % ax), + (ptype, f"particle_angular_momentum_{ax}"), sampling_type="particle", function=v, units=unit_system["angular_momentum"], @@ -417,7 +417,7 @@ def _relative_particle_position(field, data): Note that the orientation of the x and y axes are arbitrary. """ - field_names = [(ptype, "particle_position_%s" % ax) for ax in "xyz"] + field_names = [(ptype, f"particle_position_{ax}") for ax in "xyz"] return obtain_position_vector(data, field_names=field_names).T def _particle_position_relative(field, data): @@ -451,7 +451,7 @@ def _relative_particle_velocity(field, data): Note that the orientation of the x and y axes are arbitrary. """ - field_names = [(ptype, "particle_velocity_%s" % ax) for ax in "xyz"] + field_names = [(ptype, f"particle_velocity_{ax}") for ax in "xyz"] return obtain_relative_velocity_vector(data, field_names=field_names).T def _particle_velocity_relative(field, data): @@ -489,25 +489,25 @@ def _particle_vel_rel(field, data): for axi, ax in enumerate("xyz"): v, p = _get_coord_funcs_relative(axi, ptype) registry.add_field( - (ptype, "particle_velocity_relative_%s" % ax), + (ptype, f"particle_velocity_relative_{ax}"), sampling_type="particle", function=v, units="code_velocity", ) registry.add_field( - (ptype, "particle_position_relative_%s" % ax), + (ptype, f"particle_position_relative_{ax}"), sampling_type="particle", function=p, units="code_length", ) registry.add_field( - (ptype, "relative_particle_velocity_%s" % ax), + (ptype, f"relative_particle_velocity_{ax}"), sampling_type="particle", function=v, units="code_velocity", ) registry.add_field( - (ptype, "relative_particle_position_%s" % ax), + (ptype, f"relative_particle_position_{ax}"), sampling_type="particle", function=p, units="code_length", @@ -868,7 +868,7 @@ def _pfunc_avg(field, data): v[np.isnan(v)] = 0.0 return v - fn = ("deposit", "%s_avg_%s" % (ptype, field_name)) + fn = ("deposit", f"{ptype}_avg_{field_name}") registry.add_field( fn, sampling_type="cell", @@ -905,7 +905,7 @@ def add_volume_weighted_smoothed_field( def add_nearest_neighbor_field(ptype, coord_name, registry, nneighbors=64): - field_name = (ptype, "nearest_neighbor_distance_%s" % (nneighbors)) + field_name = (ptype, f"nearest_neighbor_distance_{nneighbors}") def _nth_neighbor(field, data): pos = data[ptype, coord_name] @@ -933,7 +933,7 @@ def add_nearest_neighbor_value_field(ptype, coord_name, sampled_field, registry) based on the nearest particle value found. This is useful, for instance, with voronoi-tesselations. """ - field_name = ("deposit", "%s_nearest_%s" % (ptype, sampled_field)) + field_name = ("deposit", f"{ptype}_nearest_{sampled_field}") field_units = registry[ptype, sampled_field].units unit_system = registry.ds.unit_system diff --git a/yt/fields/species_fields.py b/yt/fields/species_fields.py index fc0e5111dfd..4118e8088aa 100644 --- a/yt/fields/species_fields.py +++ b/yt/fields/species_fields.py @@ -22,21 +22,21 @@ def _create_fraction_func(ftype, species): def _frac(field, data): - return data[ftype, "%s_density" % species] / data[ftype, "density"] + return data[ftype, f"{species}_density"] / data[ftype, "density"] return _frac def _mass_from_cell_volume_and_density(ftype, species): def _mass(field, data): - return data[ftype, "%s_density" % species] * data["index", "cell_volume"] + return data[ftype, f"{species}_density"] * data["index", "cell_volume"] return _mass def _mass_from_particle_mass_and_fraction(ftype, species): def _mass(field, data): - return data[ftype, "%s_fraction" % species] * data[ftype, "particle_mass"] + return data[ftype, f"{species}_fraction"] * data[ftype, "particle_mass"] return _mass @@ -47,14 +47,14 @@ def _create_number_density_func(ftype, species): def _number_density(field, data): weight = formula.weight # This is in AMU weight *= data.ds.units.physical_constants.amu_cgs - return data[ftype, "%s_density" % species] / weight + return data[ftype, f"{species}_density"] / weight return _number_density def _create_density_func(ftype, species): def _density(field, data): - return data[ftype, "%s_fraction" % species] * data[ftype, "density"] + return data[ftype, f"{species}_fraction"] * data[ftype, "density"] return _density @@ -68,7 +68,7 @@ def add_species_field_by_density(registry, ftype, species): unit_system = registry.ds.unit_system registry.add_field( - (ftype, "%s_fraction" % species), + (ftype, f"{species}_fraction"), sampling_type="local", function=_create_fraction_func(ftype, species), units="", @@ -79,23 +79,23 @@ def add_species_field_by_density(registry, ftype, species): else: _create_mass_func = _mass_from_cell_volume_and_density registry.add_field( - (ftype, "%s_mass" % species), + (ftype, f"{species}_mass"), sampling_type="local", function=_create_mass_func(ftype, species), units=unit_system["mass"], ) registry.add_field( - (ftype, "%s_number_density" % species), + (ftype, f"{species}_number_density"), sampling_type="local", function=_create_number_density_func(ftype, species), units=unit_system["number_density"], ) return [ - (ftype, "%s_number_density" % species), - (ftype, "%s_density" % species), - (ftype, "%s_mass" % species), + (ftype, f"{species}_number_density"), + (ftype, f"{species}_density"), + (ftype, f"{species}_mass"), ] @@ -108,7 +108,7 @@ def add_species_field_by_fraction(registry, ftype, species): unit_system = registry.ds.unit_system registry.add_field( - (ftype, "%s_density" % species), + (ftype, f"{species}_density"), sampling_type="local", function=_create_density_func(ftype, species), units=unit_system["density"], @@ -119,23 +119,23 @@ def add_species_field_by_fraction(registry, ftype, species): else: _create_mass_func = _mass_from_cell_volume_and_density registry.add_field( - (ftype, "%s_mass" % species), + (ftype, f"{species}_mass"), sampling_type="local", function=_create_mass_func(ftype, species), units=unit_system["mass"], ) registry.add_field( - (ftype, "%s_number_density" % species), + (ftype, f"{species}_number_density"), sampling_type="local", function=_create_number_density_func(ftype, species), units=unit_system["number_density"], ) return [ - (ftype, "%s_number_density" % species), - (ftype, "%s_density" % species), - (ftype, "%s_mass" % species), + (ftype, f"{species}_number_density"), + (ftype, f"{species}_density"), + (ftype, f"{species}_mass"), ] @@ -148,17 +148,13 @@ def add_species_aliases(registry, ftype, alias_species, species): This function registers field aliases for the density, number_density, mass, and fraction fields between the two species given in the arguments. """ + registry.alias((ftype, f"{alias_species}_density"), (ftype, f"{species}_density")) + registry.alias((ftype, f"{alias_species}_fraction"), (ftype, f"{species}_fraction")) registry.alias( - (ftype, "%s_density" % alias_species), (ftype, "%s_density" % species) + (ftype, f"{alias_species}_number_density"), + (ftype, f"{species}_number_density"), ) - registry.alias( - (ftype, "%s_fraction" % alias_species), (ftype, "%s_fraction" % species) - ) - registry.alias( - (ftype, "%s_number_density" % alias_species), - (ftype, "%s_number_density" % species), - ) - registry.alias((ftype, "%s_mass" % alias_species), (ftype, "%s_mass" % species)) + registry.alias((ftype, f"{alias_species}_mass"), (ftype, f"{species}_mass")) def add_deprecated_species_aliases(registry, ftype, alias_species, species): @@ -187,10 +183,10 @@ def _dep_field(field, data): ('The "%s_%s" field is deprecated. ' + 'Please use "%s_%s" instead.') % (alias_species, suffix, species, suffix) ) - return data[ftype, "%s_%s" % (species, suffix)] + return data[ftype, f"{species}_{suffix}"] registry.add_field( - (ftype, "%s_%s" % (alias_species, suffix)), + (ftype, f"{alias_species}_{suffix}"), sampling_type="local", function=_dep_field, units=my_units, @@ -202,7 +198,7 @@ def add_nuclei_density_fields(registry, ftype): elements = _get_all_elements(registry.species_names) for element in elements: registry.add_field( - (ftype, "%s_nuclei_density" % element), + (ftype, f"{element}_nuclei_density"), sampling_type="local", function=_nuclei_density, units=unit_system["number_density"], @@ -214,7 +210,7 @@ def add_nuclei_density_fields(registry, ftype): if element in elements: continue registry.add_field( - (ftype, "%s_nuclei_density" % element), + (ftype, f"{element}_nuclei_density"), sampling_type="local", function=_default_nuclei_density, units=unit_system["number_density"], @@ -253,14 +249,14 @@ def _nuclei_density(field, data): ftype = field.name[0] element = field.name[1][: field.name[1].find("_")] - nuclei_mass_field = "%s_nuclei_mass_density" % element + nuclei_mass_field = f"{element}_nuclei_mass_density" if (ftype, nuclei_mass_field) in data.ds.field_info: return ( data[(ftype, nuclei_mass_field)] / ChemicalFormula(element).weight / data.ds.units.physical_constants.amu_cgs ) - metal_field = "%s_metallicity" % element + metal_field = f"{element}_metallicity" if (ftype, metal_field) in data.ds.field_info: return ( data[ftype, "density"] @@ -270,7 +266,7 @@ def _nuclei_density(field, data): ) field_data = np.zeros_like( - data[ftype, "%s_number_density" % data.ds.field_info.species_names[0]] + data[ftype, f"{data.ds.field_info.species_names[0]}_number_density"] ) for species in data.ds.field_info.species_names: nucleus = species @@ -282,7 +278,7 @@ def _nuclei_density(field, data): # we will encounter species that contribute nothing, so we skip them. if num == 0: continue - field_data += num * data[ftype, "%s_number_density" % species] + field_data += num * data[ftype, f"{species}_number_density"] return field_data @@ -310,9 +306,9 @@ def setup_species_fields(registry, ftype="gas", slice_info=None): for species in registry.species_names: # These are all the species we should be looking for fractions or # densities of. - if (ftype, "%s_density" % species) in registry: + if (ftype, f"{species}_density") in registry: func = add_species_field_by_density - elif (ftype, "%s_fraction" % species) in registry: + elif (ftype, f"{species}_fraction") in registry: func = add_species_field_by_fraction else: # Skip it @@ -323,7 +319,7 @@ def setup_species_fields(registry, ftype="gas", slice_info=None): # These are deprecated and will be removed soon. if ChemicalFormula(species).charge == 0: alias_species = species.split("_")[0] - if (ftype, "{}_density".format(alias_species)) in registry: + if (ftype, f"{alias_species}_density") in registry: continue add_deprecated_species_aliases(registry, "gas", alias_species, species) diff --git a/yt/fields/tests/test_fields.py b/yt/fields/tests/test_fields.py index 55acd2aba4d..fe85784eb39 100644 --- a/yt/fields/tests/test_fields.py +++ b/yt/fields/tests/test_fields.py @@ -62,7 +62,7 @@ class TestFieldAccess: def __init__(self, field_name, ds, nprocs): # Note this should be a field name self.field_name = field_name - self.description = "Accessing_%s_%s" % (field_name, nprocs) + self.description = f"Accessing_{field_name}_{nprocs}" self.nprocs = nprocs self.ds = ds @@ -184,7 +184,7 @@ def test_all_fields(): continue for nprocs in [1, 4, 8]: - test_all_fields.__name__ = "%s_%s" % (field, nprocs) + test_all_fields.__name__ = f"{field}_{nprocs}" yield TestFieldAccess(field, datasets[nprocs], nprocs) diff --git a/yt/fields/tests/test_fields_plugins.py b/yt/fields/tests/test_fields_plugins.py index 0cdacfd25a6..d00493d0e9c 100644 --- a/yt/fields/tests/test_fields_plugins.py +++ b/yt/fields/tests/test_fields_plugins.py @@ -72,7 +72,7 @@ def tearDownClass(cls): def testCustomField(self): plugin_file = os.path.join(CONFIG_DIR, ytcfg.get("yt", "pluginfilename")) - msg = "INFO:yt:Loading plugins from %s" % plugin_file + msg = f"INFO:yt:Loading plugins from {plugin_file}" with self.assertLogs("yt", level="INFO") as cm: yt.enable_plugins() diff --git a/yt/fields/tests/test_sph_fields.py b/yt/fields/tests/test_sph_fields.py index 51f07a8cfd1..d4ad16a323e 100644 --- a/yt/fields/tests/test_sph_fields.py +++ b/yt/fields/tests/test_sph_fields.py @@ -51,13 +51,13 @@ def sph_fields_validate(ds_fn): assert_array_almost_equal(gas_field, part_field) npart = ds.particle_type_counts[ds._sph_ptypes[0]] - err_msg = "Field %s is not the correct shape" % (gf,) + err_msg = f"Field {gf} is not the correct shape" assert_equal(npart, gas_field.shape[0], err_msg=err_msg) dd = ds.r[0.4:0.6, 0.4:0.6, 0.4:0.6] for i, ax in enumerate("xyz"): - dd.set_field_parameter("cp_%s_vec" % (ax,), yt.YTArray([1, 1, 1])) + dd.set_field_parameter(f"cp_{ax}_vec", yt.YTArray([1, 1, 1])) dd.set_field_parameter("axis", i) dd.set_field_parameter("omega_baryon", 0.3) diff --git a/yt/fields/tests/test_vector_fields.py b/yt/fields/tests/test_vector_fields.py index 15e09970319..6d65a89d335 100644 --- a/yt/fields/tests/test_vector_fields.py +++ b/yt/fields/tests/test_vector_fields.py @@ -63,20 +63,20 @@ def compare_vector_conversions(data_source): for i, d in enumerate("xyz"): assert_allclose_units( - data_source["velocity_%s" % d] - bulk_velocity[i], - data_source["relative_velocity_%s" % d], + data_source[f"velocity_{d}"] - bulk_velocity[i], + data_source[f"relative_velocity_{d}"], ) for i, ax in enumerate("xyz"): data_source.set_field_parameter("axis", i) data_source.clear_data() assert_allclose_units( - data_source["velocity_los"], data_source["relative_velocity_%s" % ax] + data_source["velocity_los"], data_source[f"relative_velocity_{ax}"] ) for i, ax in enumerate("xyz"): prj = data_source.ds.proj("velocity_los", i, weight_field="density") - assert_allclose_units(prj["velocity_los"], prj["velocity_%s" % ax]) + assert_allclose_units(prj["velocity_los"], prj[f"velocity_{ax}"]) data_source.clear_data() ax = [0.1, 0.2, -0.3] diff --git a/yt/fields/vector_operations.py b/yt/fields/vector_operations.py index 286ea35f8a0..3a535b39568 100644 --- a/yt/fields/vector_operations.py +++ b/yt/fields/vector_operations.py @@ -16,8 +16,8 @@ def get_bulk(data, basename, unit): - if data.has_field_parameter("bulk_%s" % basename): - bulk = data.get_field_parameter("bulk_%s" % basename) + if data.has_field_parameter(f"bulk_{basename}"): + bulk = data.get_field_parameter(f"bulk_{basename}") else: bulk = [0, 0, 0] * unit return bulk @@ -35,26 +35,26 @@ def create_magnitude_field( axis_order = registry.ds.coordinates.axis_order - field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + field_components = [(ftype, f"{basename}_{ax}") for ax in axis_order] if sampling_type is None: sampling_type = "local" def _magnitude(field, data): fn = field_components[0] - if data.has_field_parameter("bulk_%s" % basename): - fn = (fn[0], "relative_%s" % fn[1]) + if data.has_field_parameter(f"bulk_{basename}"): + fn = (fn[0], f"relative_{fn[1]}") d = data[fn] mag = (d) ** 2 for idim in range(1, registry.ds.dimensionality): fn = field_components[idim] - if data.has_field_parameter("bulk_%s" % basename): - fn = (fn[0], "relative_%s" % fn[1]) + if data.has_field_parameter(f"bulk_{basename}"): + fn = (fn[0], f"relative_{fn[1]}") mag += (data[fn]) ** 2 return np.sqrt(mag) registry.add_field( - (ftype, "%s_magnitude" % basename), + (ftype, f"{basename}_magnitude"), sampling_type=sampling_type, function=_magnitude, units=field_units, @@ -68,7 +68,7 @@ def create_relative_field( axis_order = registry.ds.coordinates.axis_order - field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + field_components = [(ftype, f"{basename}_{ax}") for ax in axis_order] def relative_vector(ax): def _relative_vector(field, data): @@ -81,7 +81,7 @@ def _relative_vector(field, data): for d in axis_order: registry.add_field( - (ftype, "relative_%s_%s" % (basename, d)), + (ftype, f"relative_{basename}_{d}"), sampling_type="local", function=relative_vector(d), units=field_units, @@ -93,15 +93,15 @@ def create_los_field(registry, basename, field_units, ftype="gas", slice_info=No axis_order = registry.ds.coordinates.axis_order validators = [ - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ValidateParameter("axis", {"axis": [0, 1, 2]}), ] - field_comps = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + field_comps = [(ftype, f"{basename}_{ax}") for ax in axis_order] def _los_field(field, data): - if data.has_field_parameter("bulk_%s" % basename): - fns = [(fc[0], "relative_%s" % fc[1]) for fc in field_comps] + if data.has_field_parameter(f"bulk_{basename}"): + fns = [(fc[0], f"relative_{fc[1]}") for fc in field_comps] else: fns = field_comps ax = data.get_field_parameter("axis") @@ -116,7 +116,7 @@ def _los_field(field, data): return ret registry.add_field( - (ftype, "%s_los" % basename), + (ftype, f"{basename}_los"), sampling_type="local", function=_los_field, units=field_units, @@ -130,12 +130,12 @@ def create_squared_field( axis_order = registry.ds.coordinates.axis_order - field_components = [(ftype, "%s_%s" % (basename, ax)) for ax in axis_order] + field_components = [(ftype, f"{basename}_{ax}") for ax in axis_order] def _squared(field, data): fn = field_components[0] - if data.has_field_parameter("bulk_%s" % basename): - fn = (fn[0], "relative_%s" % fn[1]) + if data.has_field_parameter(f"bulk_{basename}"): + fn = (fn[0], f"relative_{fn[1]}") squared = data[fn] * data[fn] for idim in range(1, registry.ds.dimensionality): fn = field_components[idim] @@ -143,7 +143,7 @@ def _squared(field, data): return squared registry.add_field( - (ftype, "%s_squared" % basename), + (ftype, f"{basename}_squared"), sampling_type="local", function=_squared, units=field_units, @@ -167,7 +167,7 @@ def create_vector_fields(registry, basename, field_units, ftype="gas", slice_inf else: sl_left, sl_right, div_fac = slice_info - xn, yn, zn = [(ftype, "%s_%s" % (basename, ax)) for ax in "xyz"] + xn, yn, zn = [(ftype, f"{basename}_{ax}") for ax in "xyz"] # Is this safe? if registry.ds.dimensionality < 3: @@ -181,7 +181,7 @@ def create_vector_fields(registry, basename, field_units, ftype="gas", slice_inf field_units, ftype=ftype, slice_info=slice_info, - validators=[ValidateParameter("bulk_%s" % basename)], + validators=[ValidateParameter(f"bulk_{basename}")], ) create_magnitude_field( @@ -190,7 +190,7 @@ def create_vector_fields(registry, basename, field_units, ftype="gas", slice_inf field_units, ftype=ftype, slice_info=slice_info, - validators=[ValidateParameter("bulk_%s" % basename)], + validators=[ValidateParameter(f"bulk_{basename}")], ) if not is_curvilinear(registry.ds.geometry): @@ -204,7 +204,7 @@ def _spherical_radius_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) theta = data["index", "spherical_theta"] phi = data["index", "spherical_phi"] @@ -215,14 +215,14 @@ def _spherical_radius_component(field, data): return rv registry.add_field( - (ftype, "%s_spherical_radius" % basename), + (ftype, f"{basename}_spherical_radius"), sampling_type="local", function=_spherical_radius_component, units=field_units, validators=[ ValidateParameter("normal"), ValidateParameter("center"), - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ], ) create_los_field( @@ -230,19 +230,19 @@ def _spherical_radius_component(field, data): ) def _radial(field, data): - return data[ftype, "%s_spherical_radius" % basename] + return data[ftype, f"{basename}_spherical_radius"] def _radial_absolute(field, data): - return np.abs(data[ftype, "%s_spherical_radius" % basename]) + return np.abs(data[ftype, f"{basename}_spherical_radius"]) def _tangential(field, data): return np.sqrt( - data[ftype, "%s_spherical_theta" % basename] ** 2.0 - + data[ftype, "%s_spherical_phi" % basename] ** 2.0 + data[ftype, f"{basename}_spherical_theta"] ** 2.0 + + data[ftype, f"{basename}_spherical_phi"] ** 2.0 ) registry.add_field( - (ftype, "radial_%s" % basename), + (ftype, f"radial_{basename}"), sampling_type="local", function=_radial, units=field_units, @@ -250,14 +250,14 @@ def _tangential(field, data): ) registry.add_field( - (ftype, "radial_%s_absolute" % basename), + (ftype, f"radial_{basename}_absolute"), sampling_type="local", function=_radial_absolute, units=field_units, ) registry.add_field( - (ftype, "tangential_%s" % basename), + (ftype, f"tangential_{basename}"), sampling_type="local", function=_tangential, units=field_units, @@ -271,21 +271,21 @@ def _spherical_theta_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) theta = data["index", "spherical_theta"] phi = data["index", "spherical_phi"] return get_sph_theta_component(vectors, theta, phi, normal) registry.add_field( - (ftype, "%s_spherical_theta" % basename), + (ftype, f"{basename}_spherical_theta"), sampling_type="local", function=_spherical_theta_component, units=field_units, validators=[ ValidateParameter("normal"), ValidateParameter("center"), - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ], ) @@ -297,36 +297,36 @@ def _spherical_phi_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) phi = data["index", "spherical_phi"] return get_sph_phi_component(vectors, phi, normal) registry.add_field( - (ftype, "%s_spherical_phi" % basename), + (ftype, f"{basename}_spherical_phi"), sampling_type="local", function=_spherical_phi_component, units=field_units, validators=[ ValidateParameter("normal"), ValidateParameter("center"), - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ], ) def _cp_vectors(ax): def _cp_val(field, data): - vec = data.get_field_parameter("cp_%s_vec" % (ax)) - tr = data[xn[0], "relative_%s" % xn[1]] * vec.d[0] - tr += data[yn[0], "relative_%s" % yn[1]] * vec.d[1] - tr += data[zn[0], "relative_%s" % zn[1]] * vec.d[2] + vec = data.get_field_parameter(f"cp_{ax}_vec") + tr = data[xn[0], f"relative_{xn[1]}"] * vec.d[0] + tr += data[yn[0], f"relative_{yn[1]}"] * vec.d[1] + tr += data[zn[0], f"relative_{zn[1]}"] * vec.d[2] return tr return _cp_val for ax in "xyz": registry.add_field( - (ftype, "cutting_plane_%s_%s" % (basename, ax)), + (ftype, f"cutting_plane_{basename}_{ax}"), sampling_type="local", function=_cp_vectors(ax), units=field_units, @@ -334,34 +334,34 @@ def _cp_val(field, data): def _divergence(field, data): ds = div_fac * just_one(data["index", "dx"]) - f = data[xn[0], "relative_%s" % xn[1]][sl_right, 1:-1, 1:-1] / ds - f -= data[xn[0], "relative_%s" % xn[1]][sl_left, 1:-1, 1:-1] / ds + f = data[xn[0], f"relative_{xn[1]}"][sl_right, 1:-1, 1:-1] / ds + f -= data[xn[0], f"relative_{xn[1]}"][sl_left, 1:-1, 1:-1] / ds ds = div_fac * just_one(data["index", "dy"]) - f += data[yn[0], "relative_%s" % yn[1]][1:-1, sl_right, 1:-1] / ds - f -= data[yn[0], "relative_%s" % yn[1]][1:-1, sl_left, 1:-1] / ds + f += data[yn[0], f"relative_{yn[1]}"][1:-1, sl_right, 1:-1] / ds + f -= data[yn[0], f"relative_{yn[1]}"][1:-1, sl_left, 1:-1] / ds ds = div_fac * just_one(data["index", "dz"]) - f += data[zn[0], "relative_%s" % zn[1]][1:-1, 1:-1, sl_right] / ds - f -= data[zn[0], "relative_%s" % zn[1]][1:-1, 1:-1, sl_left] / ds + f += data[zn[0], f"relative_{zn[1]}"][1:-1, 1:-1, sl_right] / ds + f -= data[zn[0], f"relative_{zn[1]}"][1:-1, 1:-1, sl_left] / ds new_field = data.ds.arr(np.zeros(data[xn].shape, dtype=np.float64), f.units) new_field[1:-1, 1:-1, 1:-1] = f return new_field def _divergence_abs(field, data): - return np.abs(data[ftype, "%s_divergence" % basename]) + return np.abs(data[ftype, f"{basename}_divergence"]) field_units = Unit(field_units, registry=registry.ds.unit_registry) div_units = field_units / registry.ds.unit_system["length"] registry.add_field( - (ftype, "%s_divergence" % basename), + (ftype, f"{basename}_divergence"), sampling_type="local", function=_divergence, units=div_units, - validators=[ValidateSpatial(1), ValidateParameter("bulk_%s" % basename)], + validators=[ValidateSpatial(1), ValidateParameter(f"bulk_{basename}")], ) registry.add_field( - (ftype, "%s_divergence_absolute" % basename), + (ftype, f"{basename}_divergence_absolute"), sampling_type="local", function=_divergence_abs, units=div_units, @@ -369,13 +369,13 @@ def _divergence_abs(field, data): def _tangential_over_magnitude(field, data): tr = ( - data[ftype, "tangential_%s" % basename] - / data[ftype, "%s_magnitude" % basename] + data[ftype, f"tangential_{basename}"] + / data[ftype, f"{basename}_magnitude"] ) return np.abs(tr) registry.add_field( - (ftype, "tangential_over_%s_magnitude" % basename), + (ftype, f"tangential_over_{basename}_magnitude"), sampling_type="local", function=_tangential_over_magnitude, take_log=False, @@ -389,13 +389,13 @@ def _cylindrical_radius_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) theta = data["index", "cylindrical_theta"] return get_cyl_r_component(vectors, theta, normal) registry.add_field( - (ftype, "%s_cylindrical_radius" % basename), + (ftype, f"{basename}_cylindrical_radius"), sampling_type="local", function=_cylindrical_radius_component, units=field_units, @@ -404,10 +404,10 @@ def _cylindrical_radius_component(field, data): def _cylindrical_radial(field, data): """This field is deprecated and will be removed in a future version""" - return data[ftype, "%s_cylindrical_radius" % basename] + return data[ftype, f"{basename}_cylindrical_radius"] registry.add_field( - (ftype, "cylindrical_radial_%s" % basename), + (ftype, f"cylindrical_radial_{basename}"), sampling_type="local", function=_cylindrical_radial, units=field_units, @@ -415,10 +415,10 @@ def _cylindrical_radial(field, data): def _cylindrical_radial_absolute(field, data): """This field is deprecated and will be removed in a future version""" - return np.abs(data[ftype, "%s_cylindrical_radius" % basename]) + return np.abs(data[ftype, f"{basename}_cylindrical_radius"]) registry.add_field( - (ftype, "cylindrical_radial_%s_absolute" % basename), + (ftype, f"cylindrical_radial_{basename}_absolute"), sampling_type="local", function=_cylindrical_radial_absolute, units=field_units, @@ -433,41 +433,41 @@ def _cylindrical_theta_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) theta = data["index", "cylindrical_theta"].copy() theta = np.tile(theta, (3,) + (1,) * len(theta.shape)) return get_cyl_theta_component(vectors, theta, normal) registry.add_field( - (ftype, "%s_cylindrical_theta" % basename), + (ftype, f"{basename}_cylindrical_theta"), sampling_type="local", function=_cylindrical_theta_component, units=field_units, validators=[ ValidateParameter("normal"), ValidateParameter("center"), - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ], ) def _cylindrical_tangential(field, data): """This field is deprecated and will be removed in a future release""" - return data[ftype, "%s_cylindrical_theta" % basename] + return data[ftype, f"{basename}_cylindrical_theta"] def _cylindrical_tangential_absolute(field, data): """This field is deprecated and will be removed in a future release""" - return np.abs(data[ftype, "cylindrical_tangential_%s" % basename]) + return np.abs(data[ftype, f"cylindrical_tangential_{basename}"]) registry.add_field( - (ftype, "cylindrical_tangential_%s" % basename), + (ftype, f"cylindrical_tangential_{basename}"), sampling_type="local", function=_cylindrical_tangential, units=field_units, ) registry.add_field( - (ftype, "cylindrical_tangential_%s_absolute" % basename), + (ftype, f"cylindrical_tangential_{basename}_absolute"), sampling_type="local", function=_cylindrical_tangential_absolute, units=field_units, @@ -481,19 +481,19 @@ def _cylindrical_z_component(field, data): """ normal = data.get_field_parameter("normal") vectors = obtain_relative_velocity_vector( - data, (xn, yn, zn), "bulk_%s" % basename + data, (xn, yn, zn), f"bulk_{basename}" ) return get_cyl_z_component(vectors, normal) registry.add_field( - (ftype, "%s_cylindrical_z" % basename), + (ftype, f"{basename}_cylindrical_z"), sampling_type="local", function=_cylindrical_z_component, units=field_units, validators=[ ValidateParameter("normal"), ValidateParameter("center"), - ValidateParameter("bulk_%s" % basename), + ValidateParameter(f"bulk_{basename}"), ], ) @@ -502,38 +502,38 @@ def _cylindrical_z_component(field, data): def _cartesian_x(field, data): if registry.ds.geometry == "polar": - return data["%s_r" % basename] * np.cos(data["theta"]) + return data[f"{basename}_r"] * np.cos(data["theta"]) elif registry.ds.geometry == "cylindrical": if data.ds.dimensionality == 2: - return data["%s_r" % basename] + return data[f"{basename}_r"] elif data.ds.dimensionality == 3: - return data["%s_r" % basename] * np.cos(data["theta"]) - data[ - "%s_theta" % basename + return data[f"{basename}_r"] * np.cos(data["theta"]) - data[ + f"{basename}_theta" ] * np.sin(data["theta"]) elif registry.ds.geometry == "spherical": if data.ds.dimensionality == 2: - return data["%s_r" % basename] * np.sin(data["theta"]) + data[ - "%s_theta" % basename + return data[f"{basename}_r"] * np.sin(data["theta"]) + data[ + f"{basename}_theta" ] * np.cos(data["theta"]) elif data.ds.dimensionality == 3: return ( - data["%s_r" % basename] + data[f"{basename}_r"] * np.sin(data["theta"]) * np.cos(data["phi"]) - + data["%s_theta" % basename] + + data[f"{basename}_theta"] * np.cos(data["theta"]) * np.cos(["phi"]) - - data["%s_phi" % basename] * np.sin(data["phi"]) + - data[f"{basename}_phi"] * np.sin(data["phi"]) ) # it's redundant to define a cartesian x field for 1D data if registry.ds.dimensionality > 1: registry.add_field( - (ftype, "%s_cartesian_x" % basename), + (ftype, f"{basename}_cartesian_x"), sampling_type="local", function=_cartesian_x, units=field_units, @@ -544,37 +544,37 @@ def _cartesian_y(field, data): if registry.ds.geometry == "polar": - return data["%s_r" % basename] * np.sin(data["theta"]) + return data[f"{basename}_r"] * np.sin(data["theta"]) elif registry.ds.geometry == "cylindrical": if data.ds.dimensionality == 2: - return data["%s_z" % basename] + return data[f"{basename}_z"] elif data.ds.dimensionality == 3: - return data["%s_r" % basename] * np.sin(data["theta"]) + data[ - "%s_theta" % basename + return data[f"{basename}_r"] * np.sin(data["theta"]) + data[ + f"{basename}_theta" ] * np.cos(data["theta"]) elif registry.ds.geometry == "spherical": if data.ds.dimensionality == 2: - return data["%s_r" % basename] * np.cos(data["theta"]) - data[ - "%s_theta" % basename + return data[f"{basename}_r"] * np.cos(data["theta"]) - data[ + f"{basename}_theta" ] * np.sin(data["theta"]) elif data.ds.dimensionality == 3: return ( - data["%s_r" % basename] + data[f"{basename}_r"] * np.sin(data["theta"]) * np.sin(data["phi"]) - + data["%s_theta" % basename] + + data[f"{basename}_theta"] * np.cos(data["theta"]) * np.sin(["phi"]) - + data["%s_phi" % basename] * np.cos(data["phi"]) + + data[f"{basename}_phi"] * np.cos(data["phi"]) ) if registry.ds.dimensionality >= 2: registry.add_field( - (ftype, "%s_cartesian_y" % basename), + (ftype, f"{basename}_cartesian_y"), sampling_type="local", function=_cartesian_y, units=field_units, @@ -584,15 +584,15 @@ def _cartesian_y(field, data): def _cartesian_z(field, data): if registry.ds.geometry == "cylindrical": - return data["%s_z" % basename] + return data[f"{basename}_z"] elif registry.ds.geometry == "spherical": - return data["%s_r" % basename] * np.cos(data["theta"]) - data[ - "%s_theta" % basename + return data[f"{basename}_r"] * np.cos(data["theta"]) - data[ + f"{basename}_theta" ] * np.sin(data["theta"]) if registry.ds.dimensionality == 3: registry.add_field( - (ftype, "%s_cartesian_z" % basename), + (ftype, f"{basename}_cartesian_z"), sampling_type="local", function=_cartesian_z, units=field_units, @@ -641,7 +641,7 @@ def _averaged_field(field, data): return new_field2 registry.add_field( - (ftype, "averaged_%s" % basename), + (ftype, f"averaged_{basename}"), sampling_type="cell", function=_averaged_field, units=field_units, diff --git a/yt/fields/xray_emission_fields.py b/yt/fields/xray_emission_fields.py index 9fb3ecb4a23..1321be535d8 100644 --- a/yt/fields/xray_emission_fields.py +++ b/yt/fields/xray_emission_fields.py @@ -41,14 +41,14 @@ def __init__(self, lower, upper): self.upper = upper def __str__(self): - return "Energy bounds are %e to %e keV." % (self.lower, self.upper) + return f"Energy bounds are {self.lower:e} to {self.upper:e} keV." class ObsoleteDataException(YTException): def __init__(self, table_type): data_file = "%s_emissivity_v%d.h5" % (table_type, data_version[table_type]) self.msg = "X-ray emissivity data is out of date.\n" - self.msg += "Download the latest data from %s/%s." % (data_url, data_file) + self.msg += f"Download the latest data from {data_url}/{data_file}." def __str__(self): return self.msg @@ -83,7 +83,7 @@ class XrayEmissivityIntegrator: def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True): filename = _get_data_file(table_type, data_dir=data_dir) - only_on_root(mylog.info, "Loading emissivity data from %s." % filename) + only_on_root(mylog.info, f"Loading emissivity data from {filename}.") in_file = h5py.File(filename, mode="r") if "info" in in_file.attrs: only_on_root(mylog.info, parse_h5_attr(in_file, "info")) @@ -109,7 +109,7 @@ def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True): self.redshift = redshift def get_interpolator(self, data_type, e_min, e_max, energy=True): - data = getattr(self, "emissivity_%s" % data_type) + data = getattr(self, f"emissivity_{data_type}") if not energy: data = data[..., :] / self.emid.v e_min = YTQuantity(e_min, "keV") * (1.0 + self.redshift) @@ -218,7 +218,7 @@ def add_xray_emissivity_field( metallicity = ds._get_field_info(*metallicity) except YTFieldNotFound: raise RuntimeError( - "Your dataset does not have a {} field! ".format(metallicity) + f"Your dataset does not have a {metallicity} field! " + "Perhaps you should specify a constant metallicity instead?" ) @@ -263,7 +263,7 @@ def _emissivity_field(field, data): return data[ftype, "norm_field"] * YTArray(my_emissivity, "erg*cm**3/s") - emiss_name = (ftype, "xray_emissivity_%s_%s_keV" % (e_min, e_max)) + emiss_name = (ftype, f"xray_emissivity_{e_min}_{e_max}_keV") ds.add_field( emiss_name, function=_emissivity_field, @@ -275,7 +275,7 @@ def _emissivity_field(field, data): def _luminosity_field(field, data): return data[emiss_name] * data[ftype, "mass"] / data[ftype, "density"] - lum_name = (ftype, "xray_luminosity_%s_%s_keV" % (e_min, e_max)) + lum_name = (ftype, f"xray_luminosity_{e_min}_{e_max}_keV") ds.add_field( lum_name, function=_luminosity_field, @@ -300,7 +300,7 @@ def _photon_emissivity_field(field, data): return data[ftype, "norm_field"] * YTArray(my_emissivity, "photons*cm**3/s") - phot_name = (ftype, "xray_photon_emissivity_%s_%s_keV" % (e_min, e_max)) + phot_name = (ftype, f"xray_photon_emissivity_{e_min}_{e_max}_keV") ds.add_field( phot_name, function=_photon_emissivity_field, @@ -343,7 +343,7 @@ def _photon_emissivity_field(field, data): "rad**-2", ) - ei_name = (ftype, "xray_intensity_%s_%s_keV" % (e_min, e_max)) + ei_name = (ftype, f"xray_intensity_{e_min}_{e_max}_keV") def _intensity_field(field, data): I = dist_fac * data[emiss_name] @@ -357,7 +357,7 @@ def _intensity_field(field, data): units="erg/cm**3/s/arcsec**2", ) - i_name = (ftype, "xray_photon_intensity_%s_%s_keV" % (e_min, e_max)) + i_name = (ftype, f"xray_photon_intensity_{e_min}_{e_max}_keV") def _photon_intensity_field(field, data): I = (1.0 + redshift) * dist_fac * data[phot_name] diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 87ebddad7a9..04bbf72960e 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -290,6 +290,6 @@ def _set_halo_properties(self): ds = self.halo_ds # Add position, mass, velocity member functions for attr_name in ("mass", "position", "velocity"): - setattr(self, attr_name, ds.r["halos", "particle_%s" % attr_name][ihalo]) + setattr(self, attr_name, ds.r["halos", f"particle_{attr_name}"][ihalo]) # Add members self.member_ids = self.halo_ds.index.io.members(ihalo).astype(np.int64) diff --git a/yt/frontends/adaptahop/fields.py b/yt/frontends/adaptahop/fields.py index f1e193c1ecd..8ab25645e28 100644 --- a/yt/frontends/adaptahop/fields.py +++ b/yt/frontends/adaptahop/fields.py @@ -58,14 +58,14 @@ def generate_pos_field(d): shift = self.ds.domain_width[0] / 2 def closure(field, data): - return data["halos", "raw_position_%s" % d] + shift + return data["halos", f"raw_position_{d}"] + shift return closure for k in "xyz": fun = generate_pos_field(k) self.add_field( - ("halos", "particle_position_%s" % k), + ("halos", f"particle_position_{k}"), sampling_type="particle", function=fun, units="Mpc", diff --git a/yt/frontends/adaptahop/io.py b/yt/frontends/adaptahop/io.py index 0cc834a5f92..2a7d0419a38 100644 --- a/yt/frontends/adaptahop/io.py +++ b/yt/frontends/adaptahop/io.py @@ -82,7 +82,7 @@ def iterate_over_attributes(attr_list): continue ptype = "halos" field_list0 = sorted(ptf[ptype], key=_find_attr_position) - field_list_pos = ["raw_position_%s" % k for k in "xyz"] + field_list_pos = [f"raw_position_{k}" for k in "xyz"] field_list = sorted( set(field_list0 + field_list_pos), key=_find_attr_position ) diff --git a/yt/frontends/ahf/data_structures.py b/yt/frontends/ahf/data_structures.py index 903f8851c17..0642cdffce7 100644 --- a/yt/frontends/ahf/data_structures.py +++ b/yt/frontends/ahf/data_structures.py @@ -44,7 +44,7 @@ def _read_particle_positions(self, ptype, f=None): halos = self.read_data(usecols=["Xc", "Yc", "Zc"]) pos = np.empty((halos.size, 3), dtype="float64") for i, ax in enumerate("XYZ"): - pos[:, i] = halos["%sc" % ax].astype("float64") + pos[:, i] = halos[f"{ax}c"].astype("float64") return pos diff --git a/yt/frontends/amrvac/fields.py b/yt/frontends/amrvac/fields.py index 04c235ab4e9..edc9d7338a9 100644 --- a/yt/frontends/amrvac/fields.py +++ b/yt/frontends/amrvac/fields.py @@ -36,7 +36,7 @@ def _velocity(field, data, idir, prefix=None): if prefix is None: prefix = "" moment = data["gas", "%smoment_%d" % (prefix, idir)] - rho = data["gas", "%sdensity" % prefix] + rho = data["gas", f"{prefix}density"] mask1 = rho == 0 if mask1.any(): @@ -109,7 +109,7 @@ def _setup_velocity_fields(self, idust=None): velocity_fn = functools.partial(_velocity, idir=idir, prefix=dust_label) functools.update_wrapper(velocity_fn, _velocity) self.add_field( - ("gas", "%svelocity_%s" % (dust_label, alias)), + ("gas", f"{dust_label}velocity_{alias}"), function=velocity_fn, units=us["velocity"], dimensions=dimensions.velocity, @@ -117,11 +117,11 @@ def _setup_velocity_fields(self, idust=None): ) self.alias( ("gas", "%svelocity_%d" % (dust_label, idir)), - ("gas", "%svelocity_%s" % (dust_label, alias)), + ("gas", f"{dust_label}velocity_{alias}"), units=us["velocity"], ) self.alias( - ("gas", "%smoment_%s" % (dust_label, alias)), + ("gas", f"{dust_label}moment_{alias}"), ("gas", "%smoment_%d" % (dust_label, idir)), units=us["density"] * us["velocity"], ) @@ -169,7 +169,7 @@ def dust_to_gas_ratio(field, data): def setup_fluid_fields(self): - setup_magnetic_field_aliases(self, "amrvac", ["mag%s" % ax for ax in "xyz"]) + setup_magnetic_field_aliases(self, "amrvac", [f"mag{ax}" for ax in "xyz"]) self._setup_velocity_fields() # gas velocities self._setup_dust_fields() # dust derived fields (including velocities) @@ -194,9 +194,9 @@ def _kinetic_energy_density(field, data): def _magnetic_energy_density(field, data): emag = 0.5 * data["gas", "magnetic_1"] ** 2 for idim in "23": - if not ("amrvac", "b%s" % idim) in self.field_list: + if not ("amrvac", f"b{idim}") in self.field_list: break - emag += 0.5 * data["gas", "magnetic_%s" % idim] ** 2 + emag += 0.5 * data["gas", f"magnetic_{idim}"] ** 2 # important note: in AMRVAC the magnetic field is defined in units where mu0 = 1, # such that # Emag = 0.5*B**2 instead of Emag = 0.5*B**2 / mu0 diff --git a/yt/frontends/api.py b/yt/frontends/api.py index 3e811bb4bac..982d664742a 100644 --- a/yt/frontends/api.py +++ b/yt/frontends/api.py @@ -46,7 +46,7 @@ class _frontend_container: def __init__(self): for frontend in _frontends: - _mod = "yt.frontends.%s.api" % frontend + _mod = f"yt.frontends.{frontend}.api" setattr(self, frontend, importlib.import_module(_mod)) setattr(self, "api", importlib.import_module("yt.frontends.api")) setattr(self, "__name__", "yt.frontends.api") diff --git a/yt/frontends/arepo/data_structures.py b/yt/frontends/arepo/data_structures.py index fc6ca2a877d..d9e121450c9 100644 --- a/yt/frontends/arepo/data_structures.py +++ b/yt/frontends/arepo/data_structures.py @@ -82,6 +82,6 @@ def _set_code_unit_attributes(self): "gauss" ) if self.cosmological_simulation: - self.magnetic_unit = self.quan(munit.value, "%s/a**2" % munit.units) + self.magnetic_unit = self.quan(munit.value, f"{munit.units}/a**2") else: self.magnetic_unit = munit diff --git a/yt/frontends/arepo/fields.py b/yt/frontends/arepo/fields.py index 98cdd5a4b86..e442c8a7793 100644 --- a/yt/frontends/arepo/fields.py +++ b/yt/frontends/arepo/fields.py @@ -99,7 +99,7 @@ def _h_p1_fraction(field, data): for species in ["H", "H_p0", "H_p1"]: for suf in ["_density", "_number_density"]: - field = "%s%s" % (species, suf) + field = f"{species}{suf}" self.alias(("gas", field), (ptype, field)) self.alias(("gas", "H_nuclei_density"), ("gas", "H_number_density")) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index b2bb623a5c2..980e20171c8 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -233,7 +233,7 @@ def _set_code_unit_attributes(self): mass = aM0 * 1.98892e33 self.cosmological_simulation = True - setdefaultattr(self, "mass_unit", self.quan(mass, "g*%s" % ng ** 3)) + setdefaultattr(self, "mass_unit", self.quan(mass, f"g*{ng ** 3}")) setdefaultattr(self, "length_unit", self.quan(box_proper, "Mpc")) setdefaultattr(self, "velocity_unit", self.quan(velocity, "cm/s")) setdefaultattr(self, "time_unit", self.length_unit / self.velocity_unit) @@ -374,7 +374,7 @@ def _is_valid(self, *args, **kwargs): Defined for the NMSU file naming scheme. This could differ for other formats. """ - f = "%s" % args[0] + f = f"{args[0]}" prefix, suffix = filename_pattern["amr"] if not os.path.isfile(f): return False @@ -515,7 +515,7 @@ def _set_code_unit_attributes(self): mass = aM0 * 1.98892e33 self.cosmological_simulation = True - self.mass_unit = self.quan(mass, "g*%s" % ng ** 3) + self.mass_unit = self.quan(mass, f"g*{ng ** 3}") self.length_unit = self.quan(box_proper, "Mpc") self.velocity_unit = self.quan(velocity, "cm/s") self.time_unit = self.length_unit / self.velocity_unit @@ -672,7 +672,7 @@ def _is_valid(self, *args, **kwargs): Defined for the NMSU file naming scheme. This could differ for other formats. """ - f = "%s" % args[0] + f = f"{args[0]}" prefix, suffix = filename_pattern["particle_data"] if not os.path.isfile(f): return False diff --git a/yt/frontends/art/fields.py b/yt/frontends/art/fields.py index 81409da73f8..4a17492f48a 100644 --- a/yt/frontends/art/fields.py +++ b/yt/frontends/art/fields.py @@ -61,13 +61,13 @@ def _temperature(field, data): def _get_vel(axis): def velocity(field, data): - return data[("gas", "momentum_%s" % axis)] / data[("gas", "density")] + return data[("gas", f"momentum_{axis}")] / data[("gas", "density")] return velocity for ax in "xyz": self.add_field( - ("gas", "velocity_%s" % ax), + ("gas", f"velocity_{ax}"), sampling_type="cell", function=_get_vel(ax), units=unit_system["velocity"], @@ -190,7 +190,7 @@ def _specific_metal_density(field, data): for atom in atoms: self.add_field( - ("gas", "%s_nuclei_mass_density" % atom), + ("gas", f"{atom}_nuclei_mass_density"), sampling_type="cell", function=_specific_metal_density_function(atom), units=unit_system["density"], diff --git a/yt/frontends/art/io.py b/yt/frontends/art/io.py index e9ac776efd0..b2fa6132b42 100644 --- a/yt/frontends/art/io.py +++ b/yt/frontends/art/io.py @@ -117,11 +117,11 @@ def _get_field(self, field): read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) for ax in "xyz": - if fname.startswith("particle_position_%s" % ax): + if fname.startswith(f"particle_position_{ax}"): dd = self.ds.domain_dimensions[0] off = 1.0 / dd tr[field] = rp(fields=[ax])[0] / dd - off - if fname.startswith("particle_velocity_%s" % ax): + if fname.startswith(f"particle_velocity_{ax}"): (tr[field],) = rp(["v" + ax]) if fname.startswith("particle_mass"): a = 0 @@ -229,12 +229,12 @@ def _get_field(self, field): read_particles, self.file_particle, self.Nrow, idxa=idxa, idxb=idxb ) for ax in "xyz": - if fname.startswith("particle_position_%s" % ax): + if fname.startswith(f"particle_position_{ax}"): # This is not the same as domain_dimensions dd = self.ds.parameters["ng"] off = 1.0 / dd tr[field] = rp(fields=[ax])[0] / dd - off - if fname.startswith("particle_velocity_%s" % ax): + if fname.startswith(f"particle_velocity_{ax}"): (tr[field],) = rp(["v" + ax]) if fname.startswith("particle_mass"): a = 0 @@ -513,7 +513,7 @@ def _read_child_mask_level(f, level_child_offsets, level, nLevel, nhydro_vars): nchem = 8 + 2 -dtyp = np.dtype(">i4,>i8,>i8" + ",>%sf4" % (nchem) + ",>%sf4" % (2) + ",>i4") +dtyp = np.dtype(">i4,>i8,>i8" + f",>{nchem}f4" + ",>%sf4" % (2) + ",>i4") def _read_child_level( diff --git a/yt/frontends/art/tests/test_outputs.py b/yt/frontends/art/tests/test_outputs.py index 2f6916c25d6..51f31f25ad9 100644 --- a/yt/frontends/art/tests/test_outputs.py +++ b/yt/frontends/art/tests/test_outputs.py @@ -63,8 +63,8 @@ def test_d9p(): ) for spnum in range(5): - npart_read = ad["specie%s" % spnum, "particle_type"].size - npart_header = ds.particle_type_counts["specie%s" % spnum] + npart_read = ad[f"specie{spnum}", "particle_type"].size + npart_header = ds.particle_type_counts[f"specie{spnum}"] if spnum == 3: # see issue 814 npart_read += 1 diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 5a501a71ca7..5e4796d2708 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -120,7 +120,7 @@ def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): # Here we perform our particle deposition. if fields is None: fields = [] - cls = getattr(particle_deposit, "deposit_%s" % method, None) + cls = getattr(particle_deposit, f"deposit_{method}", None) if cls is None: raise YTParticleDepositionNotImplemented(method) nz = self.nz @@ -203,8 +203,8 @@ def find_max_cell_location(self, field, finest_levels=3): mylog.debug("Searching for maximum value of %s", field) max_val, mx, my, mz = source.quantities["MaxLocation"](field) mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", max_val, mx, my, mz) - self.ds.parameters["Max%sValue" % (field)] = max_val - self.ds.parameters["Max%sPos" % (field)] = "%s" % ((mx, my, mz),) + self.ds.parameters[f"Max{field}Value"] = max_val + self.ds.parameters[f"Max{field}Pos"] = f"{mx, my, mz}" return max_val, np.array((mx, my, mz), dtype="float64") def _detect_output_fields(self): @@ -410,7 +410,7 @@ def _parse_parameter_file(self): if labels.count("N-BODY") > 1: for species, label in enumerate(labels): if label == "N-BODY": - labels[species] = "N-BODY_{}".format(species) + labels[species] = f"N-BODY_{species}" self.particle_types_raw = self.artio_parameters["particle_species_labels"] self.particle_types = tuple(self.particle_types_raw) diff --git a/yt/frontends/artio/fields.py b/yt/frontends/artio/fields.py index 68b734e4608..314f0293bc5 100644 --- a/yt/frontends/artio/fields.py +++ b/yt/frontends/artio/fields.py @@ -58,13 +58,13 @@ def setup_fluid_fields(self): def _get_vel(axis): def velocity(field, data): - return data["momentum_%s" % axis] / data["density"] + return data[f"momentum_{axis}"] / data["density"] return velocity for ax in "xyz": self.add_field( - ("gas", "velocity_%s" % ax), + ("gas", f"velocity_{ax}"), sampling_type="cell", function=_get_vel(ax), units=unit_system["velocity"], diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index fbd8a3241de..783730a6718 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -188,7 +188,7 @@ def _detect_output_fields(self): field = str23(splitup[1]) dtype = str23(splitup[-1]).lower() for ax in "xyz": - field_map[("athena", "%s_%s" % (field, ax))] = ( + field_map[("athena", f"{field}_{ax}")] = ( "vector", f.tell() - read_table_offset, dtype, @@ -237,18 +237,16 @@ def _parse_index(self): dataset_dir = dataset_dir[:-3] gridlistread = sglob( - os.path.join(dataset_dir, "id*/%s-id*%s" % (dname[4:-9], dname[-9:])) + os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}") ) gridlistread.insert(0, self.index_filename) if "id0" in dname: gridlistread += sglob( - os.path.join( - dataset_dir, "id*/lev*/%s*-lev*%s" % (dname[4:-9], dname[-9:]) - ) + os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}") ) else: gridlistread += sglob( - os.path.join(dataset_dir, "lev*/%s*-lev*%s" % (dname[:-9], dname[-9:])) + os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}") ) ndots = dname.count(".") gridlistread = [ @@ -506,7 +504,7 @@ def __init__( ) self.filename = filename if storage_filename is None: - storage_filename = "%s.yt" % filename.split("/")[-1] + storage_filename = f"{filename.split('/')[-1]}.yt" self.storage_filename = storage_filename self.backup_filename = self.filename[:-4] + "_backup.gdf" # Unfortunately we now have to mandate that the index gets @@ -526,7 +524,7 @@ def _set_code_unit_attributes(self): if getattr(self, unit + "_unit", None) is not None: continue mylog.warning("Assuming 1.0 = 1.0 %s", cgs) - setattr(self, "%s_unit" % unit, self.quan(1.0, cgs)) + setattr(self, f"{unit}_unit", self.quan(1.0, cgs)) self.magnetic_unit = np.sqrt( 4 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit) ) @@ -611,17 +609,15 @@ def _parse_parameter_file(self): dataset_dir = dataset_dir[:-3] gridlistread = sglob( - os.path.join(dataset_dir, "id*/%s-id*%s" % (dname[4:-9], dname[-9:])) + os.path.join(dataset_dir, f"id*/{dname[4:-9]}-id*{dname[-9:]}") ) if "id0" in dname: gridlistread += sglob( - os.path.join( - dataset_dir, "id*/lev*/%s*-lev*%s" % (dname[4:-9], dname[-9:]) - ) + os.path.join(dataset_dir, f"id*/lev*/{dname[4:-9]}*-lev*{dname[-9:]}") ) else: gridlistread += sglob( - os.path.join(dataset_dir, "lev*/%s*-lev*%s" % (dname[:-9], dname[-9:])) + os.path.join(dataset_dir, f"lev*/{dname[:-9]}*-lev*{dname[-9:]}") ) ndots = dname.count(".") gridlistread = [ diff --git a/yt/frontends/athena/fields.py b/yt/frontends/athena/fields.py index 62097b36e4d..f759558c126 100644 --- a/yt/frontends/athena/fields.py +++ b/yt/frontends/athena/fields.py @@ -9,7 +9,7 @@ def velocity_field(comp): def _velocity(field, data): - return data["athena", "momentum_%s" % comp] / data["athena", "density"] + return data["athena", f"momentum_{comp}"] / data["athena", "density"] return _velocity @@ -37,14 +37,14 @@ def setup_fluid_fields(self): unit_system = self.ds.unit_system # Add velocity fields for comp in "xyz": - vel_field = ("athena", "velocity_%s" % comp) - mom_field = ("athena", "momentum_%s" % comp) + vel_field = ("athena", f"velocity_{comp}") + mom_field = ("athena", f"momentum_{comp}") if vel_field in self.field_list: self.add_output_field( vel_field, sampling_type="cell", units="code_length/code_time" ) self.alias( - ("gas", "velocity_%s" % comp), + ("gas", f"velocity_{comp}"), vel_field, units=unit_system["velocity"], ) @@ -55,7 +55,7 @@ def setup_fluid_fields(self): units="code_mass/code_time/code_length**2", ) self.add_field( - ("gas", "velocity_%s" % comp), + ("gas", f"velocity_{comp}"), sampling_type="cell", function=velocity_field(comp), units=unit_system["velocity"], @@ -151,5 +151,5 @@ def _temperature(field, data): ) setup_magnetic_field_aliases( - self, "athena", ["cell_centered_B_%s" % ax for ax in "xyz"] + self, "athena", [f"cell_centered_B_{ax}" for ax in "xyz"] ) diff --git a/yt/frontends/athena/tests/test_outputs.py b/yt/frontends/athena/tests/test_outputs.py index fa179a20a9d..f0375cf6f50 100644 --- a/yt/frontends/athena/tests/test_outputs.py +++ b/yt/frontends/athena/tests/test_outputs.py @@ -107,8 +107,8 @@ def test_nprocs(): ) for ax in "xyz": assert_equal( - sp1.quantities.extrema("velocity_%s" % ax), - sp2.quantities.extrema("velocity_%s" % ax), + sp1.quantities.extrema(f"velocity_{ax}"), + sp2.quantities.extrema(f"velocity_{ax}"), ) assert_allclose_units( sp1.quantities.bulk_velocity(), sp2.quantities.bulk_velocity() diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index f41ee1912cd..238cca5fbd1 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -273,7 +273,7 @@ def __init__( ) self.filename = filename if storage_filename is None: - storage_filename = "%s.yt" % filename.split("/")[-1] + storage_filename = f"{filename.split('/')[-1]}.yt" self.storage_filename = storage_filename self.backup_filename = self.filename[:-4] + "_backup.gdf" @@ -294,7 +294,7 @@ def _set_code_unit_attributes(self): if getattr(self, unit + "_unit", None) is not None: continue mylog.warning("Assuming 1.0 = 1.0 %s", cgs) - setattr(self, "%s_unit" % unit, self.quan(1.0, cgs)) + setattr(self, f"{unit}_unit", self.quan(1.0, cgs)) self.magnetic_unit = np.sqrt( 4 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit) diff --git a/yt/frontends/athena_pp/fields.py b/yt/frontends/athena_pp/fields.py index 378470c7184..666288c62fd 100644 --- a/yt/frontends/athena_pp/fields.py +++ b/yt/frontends/athena_pp/fields.py @@ -37,7 +37,7 @@ def setup_fluid_fields(self): vel_field, sampling_type="cell", units="code_length/code_time" ) self.alias( - ("gas", "%s_%s" % (vel_prefix, comp)), + ("gas", f"{vel_prefix}_{comp}"), vel_field, units=unit_system["velocity"], ) @@ -48,7 +48,7 @@ def setup_fluid_fields(self): units="code_mass/code_time/code_length**2", ) self.add_field( - ("gas", "%s_%s" % (vel_prefix, comp)), + ("gas", f"{vel_prefix}_{comp}"), sampling_type="cell", function=velocity_field(i + 1), units=unit_system["velocity"], diff --git a/yt/frontends/athena_pp/io.py b/yt/frontends/athena_pp/io.py index 8da70f46ff2..813d014ebee 100644 --- a/yt/frontends/athena_pp/io.py +++ b/yt/frontends/athena_pp/io.py @@ -51,7 +51,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): for field in fields: ftype, fname = field dname, fdi = self.ds._field_map[fname] - ds = f["/%s" % dname] + ds = f[f"/{dname}"] ind = 0 for chunk in chunks: if self.ds.logarithmic: @@ -86,7 +86,7 @@ def _read_chunk_data(self, chunk, fields): for field in fields: ftype, fname = field dname, fdi = self.ds._field_map[fname] - ds = f["/%s" % dname] + ds = f[f"/{dname}"] for gs in grid_sequences(chunk.objs): start = gs[0].id - gs[0]._id_offset end = gs[-1].id - gs[-1]._id_offset + 1 diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index b4ceeacfd55..1e8bafd3cb8 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -477,9 +477,9 @@ def _cache_endianness(self, test_grid): # * FLOAT data # * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27 if bpr == endian[0]: - dtype = "f{bpr}" else: raise ValueError( "FAB header is neither big nor little endian. Perhaps the file is corrupt?" diff --git a/yt/frontends/boxlib/fields.py b/yt/frontends/boxlib/fields.py index e4ecc22b24e..4feda61d89c 100644 --- a/yt/frontends/boxlib/fields.py +++ b/yt/frontends/boxlib/fields.py @@ -224,13 +224,13 @@ class BoxlibFieldInfo(FieldInfoContainer): def setup_particle_fields(self, ptype): def _get_vel(axis): def velocity(field, data): - return data["particle_momentum_%s" % axis] / data["particle_mass"] + return data[f"particle_momentum_{axis}"] / data["particle_mass"] return velocity for ax in "xyz": self.add_field( - (ptype, "particle_velocity_%s" % ax), + (ptype, f"particle_velocity_{ax}"), sampling_type="particle", function=_get_vel(ax), units="code_length/code_time", @@ -268,13 +268,13 @@ def setup_fluid_fields(self): def setup_momentum_to_velocity(self): def _get_vel(axis): def velocity(field, data): - return data["%smom" % axis] / data["density"] + return data[f"{axis}mom"] / data["density"] return velocity for ax in "xyz": self.add_field( - ("gas", "velocity_%s" % ax), + ("gas", f"velocity_{ax}"), sampling_type="cell", function=_get_vel(ax), units=self.ds.unit_system["velocity"], @@ -283,13 +283,13 @@ def velocity(field, data): def setup_velocity_to_momentum(self): def _get_mom(axis): def momentum(field, data): - return data["%svel" % axis] * data["density"] + return data[f"{axis}vel"] * data["density"] return momentum for ax in "xyz": self.add_field( - ("gas", "momentum_%s" % ax), + ("gas", f"momentum_{ax}"), sampling_type="cell", function=_get_mom(ax), units=mom_units, @@ -352,11 +352,11 @@ def setup_fluid_fields(self): # We have a fraction nice_name, tex_label = _nice_species_name(field) self.alias( - ("gas", "%s_fraction" % nice_name), ("boxlib", field), units="" + ("gas", f"{nice_name}_fraction"), ("boxlib", field), units="" ) - func = _create_density_func(("gas", "%s_fraction" % nice_name)) + func = _create_density_func(("gas", f"{nice_name}_fraction")) self.add_field( - name=("gas", "%s_density" % nice_name), + name=("gas", f"{nice_name}_density"), sampling_type="cell", function=func, units=self.ds.unit_system["density"], @@ -465,11 +465,11 @@ def setup_fluid_fields(self): display_name=tex_label, ) self.alias( - ("gas", "%s_fraction" % nice_name), ("boxlib", field), units="" + ("gas", f"{nice_name}_fraction"), ("boxlib", field), units="" ) - func = _create_density_func(("gas", "%s_fraction" % nice_name)) + func = _create_density_func(("gas", f"{nice_name}_fraction")) self.add_field( - name=("gas", "%s_density" % nice_name), + name=("gas", f"{nice_name}_density"), sampling_type="cell", function=func, units=unit_system["density"], @@ -501,7 +501,7 @@ def setup_fluid_fields(self): display_name=display_name, ) self.alias( - ("gas", "%s_creation_rate" % nice_name), + ("gas", f"{nice_name}_creation_rate"), ("boxlib", field), units=unit_system["frequency"], ) diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index 7a9014c09f5..4e591b94d2f 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -176,8 +176,8 @@ def _parse_index(self): self.dds_list[lev_index][2] = 1.0 for level_id, box in enumerate(boxes): - si = np.array([box["lo_%s" % ax] for ax in "ijk"[:D]]) - ei = np.array([box["hi_%s" % ax] for ax in "ijk"[:D]]) + si = np.array([box[f"lo_{ax}"] for ax in "ijk"[:D]]) + ei = np.array([box[f"hi_{ax}"] for ax in "ijk"[:D]]) if D == 1: si = np.concatenate((si, [0.0, 0.0])) @@ -430,8 +430,8 @@ def _parse_index(self): self.dds_list[lev_index][2] = 1.0 for level_id, box in enumerate(boxes): - si = np.array([box["lo_%s" % ax] for ax in "ijk"[:D]]) - ei = np.array([box["hi_%s" % ax] for ax in "ijk"[:D]]) + si = np.array([box[f"lo_{ax}"] for ax in "ijk"[:D]]) + ei = np.array([box[f"hi_{ax}"] for ax in "ijk"[:D]]) if D == 1: si = np.concatenate((si, [0.0, 0.0])) diff --git a/yt/frontends/chombo/fields.py b/yt/frontends/chombo/fields.py index 58d36d98a6f..ac94a9fd579 100644 --- a/yt/frontends/chombo/fields.py +++ b/yt/frontends/chombo/fields.py @@ -67,13 +67,13 @@ class Orion2FieldInfo(ChomboFieldInfo): def setup_particle_fields(self, ptype): def _get_vel(axis): def velocity(field, data): - return data["particle_momentum_%s" % axis] / data["particle_mass"] + return data[f"particle_momentum_{axis}"] / data["particle_mass"] return velocity for ax in "xyz": self.add_field( - (ptype, "particle_velocity_%s" % ax), + (ptype, f"particle_velocity_{ax}"), sampling_type="particle", function=_get_vel(ax), units="code_length/code_time", @@ -127,13 +127,13 @@ def _temperature(field, data): def _get_vel(axis): def velocity(field, data): - return data["momentum_%s" % axis] / data["density"] + return data[f"momentum_{axis}"] / data["density"] return velocity for ax in "xyz": self.add_field( - ("gas", "velocity_%s" % ax), + ("gas", f"velocity_{ax}"), sampling_type="cell", function=_get_vel(ax), units=unit_system["velocity"], @@ -182,7 +182,7 @@ def velocity(field, data): ) setup_magnetic_field_aliases( - self, "chombo", ["%s-magnfield" % ax for ax in "XYZ"] + self, "chombo", [f"{ax}-magnfield" for ax in "XYZ"] ) @@ -234,8 +234,8 @@ def setup_particle_fields(self, ptype, ftype="gas", num_neighbors=64): for alias in aliases: self.alias((ptype, alias), (ptype, f), units=output_units) - ppos_fields = ["particle_position_%s" % ax for ax in "xyz"] - pvel_fields = ["particle_velocity_%s" % ax for ax in "xyz"] + ppos_fields = [f"particle_position_{ax}" for ax in "xyz"] + pvel_fields = [f"particle_velocity_{ax}" for ax in "xyz"] particle_vector_functions(ptype, ppos_fields, pvel_fields, self) particle_deposition_functions(ptype, "particle_position", "particle_mass", self) @@ -394,4 +394,4 @@ class PlutoFieldInfo(ChomboFieldInfo): def setup_fluid_fields(self): from yt.fields.magnetic_field import setup_magnetic_field_aliases - setup_magnetic_field_aliases(self, "chombo", ["bx%s" % ax for ax in [1, 2, 3]]) + setup_magnetic_field_aliases(self, "chombo", [f"bx{ax}" for ax in [1, 2, 3]]) diff --git a/yt/frontends/chombo/io.py b/yt/frontends/chombo/io.py index 3b2bbce2982..cf1fd0ba918 100644 --- a/yt/frontends/chombo/io.py +++ b/yt/frontends/chombo/io.py @@ -166,7 +166,7 @@ def _read_particle_selection(self, chunks, selector, fields): def _read_particles(self, grid, name): field_index = self.particle_field_index[name] - lev = "level_%s" % grid.Level + lev = f"level_{grid.Level}" particles_per_grid = self._handle[lev]["particles:offsets"][()] items_per_particle = len(self._particle_field_index) diff --git a/yt/frontends/enzo/answer_testing_support.py b/yt/frontends/enzo/answer_testing_support.py index f9badf02841..2f2552a0c35 100644 --- a/yt/frontends/enzo/answer_testing_support.py +++ b/yt/frontends/enzo/answer_testing_support.py @@ -100,7 +100,7 @@ def __call__(self): for xmin, xmax in zip(self.left_edges, self.right_edges): mask = (position >= xmin) * (position <= xmax) exact_field = np.interp(position[mask], exact["pos"], exact[k]) - myname = "ShockTubeTest_%s" % k + myname = f"ShockTubeTest_{k}" # yield test vs analytical solution yield AssertWrapper( myname, diff --git a/yt/frontends/enzo/data_structures.py b/yt/frontends/enzo/data_structures.py index c5ba83655b8..d1dfdf29b2d 100644 --- a/yt/frontends/enzo/data_structures.py +++ b/yt/frontends/enzo/data_structures.py @@ -155,7 +155,7 @@ def __init__(self, ds, dataset_type): self._bn = ds.file_style else: self._bn = "%s.cpu%%04i" - self.index_filename = os.path.abspath("%s.hierarchy" % (ds.parameter_filename)) + self.index_filename = os.path.abspath(f"{ds.parameter_filename}.hierarchy") if os.path.getsize(self.index_filename) == 0: raise IOError(-1, "File empty", self.index_filename) self.directory = os.path.dirname(self.index_filename) @@ -972,9 +972,9 @@ def _set_code_unit_attributes(self): @classmethod def _is_valid(cls, *args, **kwargs): - if ("%s" % (args[0])).endswith(".hierarchy"): + if (f"{args[0]}").endswith(".hierarchy"): return True - return os.path.exists("%s.hierarchy" % args[0]) + return os.path.exists(f"{args[0]}.hierarchy") @classmethod def _guess_candidates(cls, base, directories, files): diff --git a/yt/frontends/enzo/fields.py b/yt/frontends/enzo/fields.py index e2fd0ac31c5..a225b4bf4a7 100644 --- a/yt/frontends/enzo/fields.py +++ b/yt/frontends/enzo/fields.py @@ -139,7 +139,7 @@ def add_species_field(self, species): # items... # self.add_output_field( - ("enzo", "%s_Density" % species), + ("enzo", f"{species}_Density"), sampling_type="cell", take_log=True, units="code_mass/code_length**3", @@ -147,9 +147,7 @@ def add_species_field(self, species): yt_name = known_species_names[species] # don't alias electron density since mass is wrong if species != "Electron": - self.alias( - ("gas", "%s_density" % yt_name), ("enzo", "%s_Density" % species) - ) + self.alias(("gas", f"{yt_name}_density"), ("enzo", f"{species}_Density")) def setup_species_fields(self): species_names = [ @@ -185,7 +183,7 @@ def setup_fluid_fields(self): if multi_species > 0 or dengo == 1: self.setup_species_fields() self.setup_energy_field() - setup_magnetic_field_aliases(self, "enzo", ["B%s" % ax for ax in "xyz"]) + setup_magnetic_field_aliases(self, "enzo", [f"B{ax}" for ax in "xyz"]) def setup_energy_field(self): unit_system = self.ds.unit_system diff --git a/yt/frontends/enzo/io.py b/yt/frontends/enzo/io.py index 2fdb62bbded..e74d86c2c1a 100644 --- a/yt/frontends/enzo/io.py +++ b/yt/frontends/enzo/io.py @@ -79,7 +79,7 @@ def _read_particle_fields(self, chunks, ptf, selector): if ptype != "io": if g.NumberOfActiveParticles[ptype] == 0: continue - pds = ds.get("Particles/%s" % ptype) + pds = ds.get(f"Particles/{ptype}") else: pds = ds pn = _particle_position_names.get(ptype, r"particle_position_%s") diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 49406f99646..1f6db1d6b5c 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -82,7 +82,7 @@ def _set_units(self): ) # Comoving lengths for my_unit in ["m", "pc", "AU"]: - new_unit = "%scm" % my_unit + new_unit = f"{my_unit}cm" # technically not true, but should be ok self.unit_registry.add( new_unit, @@ -602,7 +602,7 @@ def _find_outputs(self): # look for time outputs. potential_time_outputs = glob.glob( os.path.join( - self.parameters["GlobalDir"], "%s*" % self.parameters["DataDumpDir"] + self.parameters["GlobalDir"], f"{self.parameters['DataDumpDir']}*" ) ) self.all_time_outputs = self._check_for_outputs(potential_time_outputs) @@ -611,7 +611,7 @@ def _find_outputs(self): # look for redshift outputs. potential_redshift_outputs = glob.glob( os.path.join( - self.parameters["GlobalDir"], "%s*" % self.parameters["RedshiftDumpDir"] + self.parameters["GlobalDir"], f"{self.parameters['RedshiftDumpDir']}*" ) ) self.all_redshift_outputs = self._check_for_outputs(potential_redshift_outputs) @@ -653,8 +653,8 @@ def _check_for_outputs(self, potential_outputs): index = output[output.find(dir_key) + len(dir_key) :] filename = os.path.join( self.parameters["GlobalDir"], - "%s%s" % (dir_key, index), - "%s%s" % (output_key, index), + f"{dir_key}{index}", + f"{output_key}{index}", ) try: ds = load(filename) diff --git a/yt/frontends/enzo/tests/test_outputs.py b/yt/frontends/enzo/tests/test_outputs.py index 79afe8c791a..e5e6dd0833d 100644 --- a/yt/frontends/enzo/tests/test_outputs.py +++ b/yt/frontends/enzo/tests/test_outputs.py @@ -40,7 +40,7 @@ def color_conservation(ds): for s in sorted(species_names): if s == "El": continue - dens_yt -= dd["%s_density" % s] + dens_yt -= dd[f"{s}_density"] dens_yt -= dd["metal_density"] delta_yt = np.abs(dens_yt / dd["density"]) # Now we compare color conservation to Enzo's color conservation @@ -72,7 +72,7 @@ def check_color_conservation(ds): for s in sorted(species_names): if s == "El": continue - dens_yt -= dd["%s_density" % s] + dens_yt -= dd[f"{s}_density"] dens_yt -= dd["metal_density"] delta_yt = np.abs(dens_yt / dd["density"]) @@ -190,8 +190,8 @@ def test_active_particle_datasets(): "metallicity", "particle_mass", ] - pfields += ["particle_position_%s" % d for d in "xyz"] - pfields += ["particle_velocity_%s" % d for d in "xyz"] + pfields += [f"particle_position_{d}" for d in "xyz"] + pfields += [f"particle_velocity_{d}" for d in "xyz"] acc_part_fields = [("AccretingParticle", pf) for pf in ["AccretionRate"] + pfields] diff --git a/yt/frontends/enzo_p/data_structures.py b/yt/frontends/enzo_p/data_structures.py index 3106b201870..0d3c2081e09 100644 --- a/yt/frontends/enzo_p/data_structures.py +++ b/yt/frontends/enzo_p/data_structures.py @@ -377,7 +377,7 @@ def _parse_parameter_file(self): for attr in co_pars ) for attr in ["hubble_constant", "omega_matter", "omega_lambda"]: - setattr(self, attr, co_dict["%s_now" % attr]) + setattr(self, attr, co_dict[f"{attr}_now"]) # Current redshift is not stored, so it's not possible # to set all cosmological units yet. @@ -444,7 +444,7 @@ def _set_code_unit_attributes(self): p = self.parameters for d, u in zip(("length", "time"), ("cm", "s")): val = nested_dict_get(p, ("Units", d), default=1) - setdefaultattr(self, "%s_unit" % d, self.quan(val, u)) + setdefaultattr(self, f"{d}_unit", self.quan(val, u)) mass = nested_dict_get(p, ("Units", "mass")) if mass is None: density = nested_dict_get(p, ("Units", "density")) diff --git a/yt/frontends/enzo_p/io.py b/yt/frontends/enzo_p/io.py index 60bdbc05239..de985408d6c 100644 --- a/yt/frontends/enzo_p/io.py +++ b/yt/frontends/enzo_p/io.py @@ -154,7 +154,7 @@ def _read_obj_field(self, obj, field, fid_data): else: close = False ftype, fname = field - node = "/%s/field%s%s" % (obj.block_name, self._sep, fname) + node = f"/{obj.block_name}/field{self._sep}{fname}" dg = h5py.h5d.open(fid, node.encode("latin-1")) rdata = np.empty( self.ds.grid_dimensions[: self.ds.dimensionality][::-1], diff --git a/yt/frontends/enzo_p/tests/test_misc.py b/yt/frontends/enzo_p/tests/test_misc.py index d8b236ae7d5..db00335b57e 100644 --- a/yt/frontends/enzo_p/tests/test_misc.py +++ b/yt/frontends/enzo_p/tests/test_misc.py @@ -27,7 +27,7 @@ def get_random_block_string(max_n=64, random_state=None, level=None): if level is None: level = random_state.randint(0, high=max_l) if level > 0: - my_block = "%s:%s" % (num2[:-level], num2[-level:]) + my_block = f"{num2[:-level]}:{num2[-level:]}" else: my_block = num2 my_block = "B" + my_block @@ -45,7 +45,7 @@ def flip_random_block_bit(block, rs): # choose which descriptor to modify flippable = [i for i, descr in enumerate(descriptors) if len(descr) > 0] if len(flippable) == 0: # when block in ['B', 'B_', 'B__'] - raise ValueError("%s has no bits that can be flipped" % block) + raise ValueError(f"{block} has no bits that can be flipped") descr_index = flippable[rs.randint(0, len(flippable))] # split block descriptor into left and right parts @@ -93,7 +93,7 @@ def test_root_blocks(): max_n = 2 ** i n1, l1, b1 = get_random_block_string(max_n=max_n, random_state=rs, level=0) n2, l2, b2 = get_random_block_string(max_n=32, random_state=rs, level=0) - block = "%s:%s" % (b1, b2[1:]) + block = f"{b1}:{b2[1:]}" nrb = get_root_blocks(block, min_dim=1) assert nrb == max_n @@ -113,7 +113,7 @@ def test_is_parent(): max_n=max_n, random_state=rs, level=0 ) n2, l2, b2 = get_random_block_string(max_n=32, random_state=rs, level=0) - descriptors.append("%s:%s" % (b1[1:], b2[1:])) + descriptors.append(f"{b1[1:]}:{b2[1:]}") block = "B" + "_".join(descriptors) # since b2 is computed with max_n=32 in the for-loop, block always # has a refined great-great-grandparent diff --git a/yt/frontends/exodus_ii/data_structures.py b/yt/frontends/exodus_ii/data_structures.py index 3587e2888f6..039ba1795b8 100644 --- a/yt/frontends/exodus_ii/data_structures.py +++ b/yt/frontends/exodus_ii/data_structures.py @@ -295,7 +295,7 @@ def _read_coordinates(self): with self._handle.open_ds() as ds: if "coord" not in ds.variables: coords = ( - np.array([ds.variables["coord%s" % ax][:] for ax in coord_axes]) + np.array([ds.variables[f"coord{ax}"][:] for ax in coord_axes]) .transpose() .astype("f8") ) @@ -321,8 +321,8 @@ def _apply_displacement(self, coords, mesh_id): coord_axes = "xyz"[: self.dimensionality] with self._handle.open_ds() as ds: for i, ax in enumerate(coord_axes): - if "disp_%s" % ax in self.parameters["nod_names"]: - ind = self.parameters["nod_names"].index("disp_%s" % ax) + if f"disp_{ax}" in self.parameters["nod_names"]: + ind = self.parameters["nod_names"].index(f"disp_{ax}") disp = ds.variables["vals_nod_var%d" % (ind + 1)][self.step] new_coords[:, i] = coords[:, i] + fac * disp + offset[i] diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index a029e987c66..f36bb97a5dc 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -30,7 +30,7 @@ class ExodusIISimulation(DatasetSeries, metaclass=RegisteredSimulationTimeSeries def __init__(self, simulation_directory, find_outputs=False): self.simulation_directory = simulation_directory - fn_pattern = "%s/*" % self.simulation_directory + fn_pattern = f"{self.simulation_directory}/*" potential_outputs = glob.glob(fn_pattern) self.all_outputs = self._check_for_outputs(potential_outputs) self.all_outputs.sort(key=lambda obj: obj["filename"]) diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 72b6f039590..d95ae89b909 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -352,7 +352,7 @@ def __init__( if isinstance( self.filenames[0], _astropy.pyfits.hdu.image._ImageBaseHDU ) or isinstance(self.filenames[0], _astropy.pyfits.HDUList): - fn = "InMemoryFITSFile_%s" % uuid.uuid4().hex + fn = f"InMemoryFITSFile_{uuid.uuid4().hex}" else: fn = self.filenames[0] self._handle._fits_files.append(self._handle) @@ -417,7 +417,7 @@ def _set_code_unit_attributes(self): if getattr(self, unit + "_unit", None) is not None: continue mylog.warning("Assuming 1.0 = 1.0 %s", cgs) - setdefaultattr(self, "%s_unit" % unit, self.quan(1.0, cgs)) + setdefaultattr(self, f"{unit}_unit", self.quan(1.0, cgs)) self.magnetic_unit = np.sqrt( 4 * np.pi * self.mass_unit / (self.time_unit ** 2 * self.length_unit) ) @@ -584,7 +584,7 @@ def _set_code_unit_attributes(self): if unit == "magnetic": short_unit = "bfunit" else: - short_unit = "%sunit" % unit[0] + short_unit = f"{unit[0]}unit" if short_unit in self.primary_header: # units should now be in header u = self.quan( @@ -607,7 +607,7 @@ def _set_code_unit_attributes(self): unit, cgs, ) - setdefaultattr(self, "%s_unit" % unit, u) + setdefaultattr(self, f"{unit}_unit", u) def _determine_bbox(self): dx = np.zeros(3) @@ -690,7 +690,7 @@ def _set_code_unit_attributes(self): if units == "rad": units = "radian" pixel_area = np.prod(np.abs(self.wcs_2d.wcs.cdelt)) - pixel_area = self.quan(pixel_area, "%s**2" % (units)).in_cgs() + pixel_area = self.quan(pixel_area, f"{units}**2").in_cgs() pixel_dims = pixel_area.units.dimensions self.unit_registry.add("pixel", float(pixel_area.value), dimensions=pixel_dims) if "beam_size" in self.specified_parameters: diff --git a/yt/frontends/fits/misc.py b/yt/frontends/fits/misc.py index 19f70a69a67..26d067ec358 100644 --- a/yt/frontends/fits/misc.py +++ b/yt/frontends/fits/misc.py @@ -53,7 +53,7 @@ def setup_counts_fields(ds, ebounds, ftype="gas"): """ for (emin, emax) in ebounds: cfunc = _make_counts(emin, emax) - fname = "counts_%s-%s" % (emin, emax) + fname = f"counts_{emin}-{emax}" mylog.info("Creating counts field %s.", fname) ds.add_field( (ftype, fname), @@ -61,7 +61,7 @@ def setup_counts_fields(ds, ebounds, ftype="gas"): function=cfunc, units="counts/pixel", validators=[ValidateSpatial()], - display_name="Counts (%s-%s keV)" % (emin, emax), + display_name=f"Counts ({emin}-{emax} keV)", ) @@ -189,7 +189,7 @@ def _reg_field(field, data): if field_parameters is not None: for k, v in field_parameters.items(): obj.set_field_parameter(k, v) - return obj.cut_region(["obj['%s'] > 0" % (reg_name)]) + return obj.cut_region([f"obj['{reg_name}'] > 0"]) class PlotWindowWCS: @@ -241,8 +241,8 @@ def __init__(self, pw): wcs = pw.ds.wcs_2d.wcs xax = pw.ds.coordinates.x_axis[pw.data_source.axis] yax = pw.ds.coordinates.y_axis[pw.data_source.axis] - xlabel = "%s (%s)" % (wcs.ctype[xax].split("-")[0], wcs.cunit[xax]) - ylabel = "%s (%s)" % (wcs.ctype[yax].split("-")[0], wcs.cunit[yax]) + xlabel = f"{wcs.ctype[xax].split('-')[0]} ({wcs.cunit[xax]})" + ylabel = f"{wcs.ctype[yax].split('-')[0]} ({wcs.cunit[yax]})" fp = pw._font_properties wcs_ax.coords[0].set_axislabel(xlabel, fontproperties=fp, minpad=0.5) wcs_ax.coords[1].set_axislabel(ylabel, fontproperties=fp, minpad=0.4) diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 2af37394ebe..3732b4fed00 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -94,7 +94,7 @@ def _parse_index(self): nzb = ds.parameters["nzb"] except KeyError: nxb, nyb, nzb = [ - int(f["/simulation parameters"]["n%sb" % ax]) for ax in "xyz" + int(f["/simulation parameters"][f"n{ax}b"]) for ax in "xyz" ] self.grid_dimensions[:] *= (nxb, nyb, nzb) try: @@ -298,7 +298,7 @@ def _parse_parameter_file(self): # overwrite scalars with the same name. for ptype in ["scalars", "runtime parameters"]: for vtype in ["integer", "real", "logical", "string"]: - hns.append("%s %s" % (vtype, ptype)) + hns.append(f"{vtype} {ptype}") if self._flash_version > 7: for hn in hns: if hn not in self._handle: @@ -358,7 +358,7 @@ def _parse_parameter_file(self): nzb = self.parameters["nzb"] except KeyError: nxb, nyb, nzb = [ - int(self._handle["/simulation parameters"]["n%sb" % ax]) for ax in "xyz" + int(self._handle["/simulation parameters"][f"n{ax}b"]) for ax in "xyz" ] # FLASH2 only! # Determine dimensionality @@ -393,12 +393,8 @@ def _parse_parameter_file(self): nblocky = 1 # Determine domain boundaries - dle = np.array([self.parameters["%smin" % ax] for ax in "xyz"]).astype( - "float64" - ) - dre = np.array([self.parameters["%smax" % ax] for ax in "xyz"]).astype( - "float64" - ) + dle = np.array([self.parameters[f"{ax}min"] for ax in "xyz"]).astype("float64") + dre = np.array([self.parameters[f"{ax}max"] for ax in "xyz"]).astype("float64") if self.dimensionality < 3: for d in [dimensionality] + list(range(3 - dimensionality)): if dle[d] == dre[d]: @@ -436,7 +432,7 @@ def _parse_parameter_file(self): # Determine if this is a periodic box p = [ - self.parameters.get("%sl_boundary_type" % ax, None) == "periodic" + self.parameters.get(f"{ax}l_boundary_type", None) == "periodic" for ax in "xyz" ] self.periodicity = tuple(p) diff --git a/yt/frontends/flash/fields.py b/yt/frontends/flash/fields.py index d9fd0d291d8..ca7c7d335f9 100644 --- a/yt/frontends/flash/fields.py +++ b/yt/frontends/flash/fields.py @@ -90,10 +90,10 @@ def setup_fluid_fields(self): Na = self.ds.quan(6.022140857e23, "g**-1") for i in range(1, 1000): self.add_output_field( - ("flash", "r{0:03}".format(i)), + ("flash", f"r{i:03}"), sampling_type="cell", units="", - display_name="Energy Group {0}".format(i), + display_name=f"Energy Group {i}", ) # Add energy fields def ekin(data): @@ -225,4 +225,4 @@ def _number_density(field, data): units=unit_system["number_density"], ) - setup_magnetic_field_aliases(self, "flash", ["mag%s" % ax for ax in "xyz"]) + setup_magnetic_field_aliases(self, "flash", [f"mag{ax}" for ax in "xyz"]) diff --git a/yt/frontends/flash/io.py b/yt/frontends/flash/io.py index 63d24fd0d2f..50932cc4a51 100644 --- a/yt/frontends/flash/io.py +++ b/yt/frontends/flash/io.py @@ -59,7 +59,7 @@ def io_iter(self, chunks, fields): # outside; here, though, we're iterating over them on the # inside because we may exhaust our chunks. ftype, fname = field - ds = f["/%s" % fname] + ds = f[f"/{fname}"] for gs in grid_sequences(chunk.objs): start = gs[0].id - gs[0]._id_offset end = gs[-1].id - gs[-1]._id_offset + 1 @@ -71,7 +71,7 @@ def _read_particle_coords(self, chunks, ptf): chunks = list(chunks) f_part = self._particle_handle p_ind = self.ds.index._particle_indices - px, py, pz = (self._particle_fields["particle_pos%s" % ax] for ax in "xyz") + px, py, pz = (self._particle_fields[f"particle_pos{ax}"] for ax in "xyz") p_fields = f_part["/tracer particles"] assert len(ptf) == 1 ptype = list(ptf.keys())[0] @@ -89,7 +89,7 @@ def _read_particle_fields(self, chunks, ptf, selector): chunks = list(chunks) f_part = self._particle_handle p_ind = self.ds.index._particle_indices - px, py, pz = (self._particle_fields["particle_pos%s" % ax] for ax in "xyz") + px, py, pz = (self._particle_fields[f"particle_pos{ax}"] for ax in "xyz") p_fields = f_part["/tracer particles"] assert len(ptf) == 1 ptype = list(ptf.keys())[0] @@ -116,7 +116,7 @@ def _read_obj_field(self, obj, field, ds_offset=None): # our context here includes datasets and whatnot that are opened in the # hdf5 file if ds is None: - ds = self._handle["/%s" % field[1]] + ds = self._handle[f"/{field[1]}"] if offset == -1: data = ds[obj.id - obj._id_offset, :, :, :].transpose() else: @@ -142,7 +142,7 @@ def _read_chunk_data(self, chunk, fields): return rv for field in fluid_fields: ftype, fname = field - ds = f["/%s" % fname] + ds = f[f"/{fname}"] for gs in grid_sequences(chunk.objs): start = gs[0].id - gs[0]._id_offset end = gs[-1].id - gs[-1]._id_offset + 1 @@ -162,7 +162,7 @@ def __init__(self, ds): self._handle = ds._handle self._particle_fields = determine_particle_fields(self._handle) self._position_fields = [ - self._particle_fields["particle_pos%s" % ax] for ax in "xyz" + self._particle_fields[f"particle_pos{ax}"] for ax in "xyz" ] self._chunksize = 32 ** 3 diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index c5fa67e503d..862aa86e29a 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -154,7 +154,7 @@ def open(self): for filename in [self.filename, self.filename + ".0"]: if os.path.exists(filename): return open(filename, "rb") - raise RuntimeError("Snapshot file %s does not exist." % self.filename) + raise RuntimeError(f"Snapshot file {self.filename} does not exist.") def validate(self): """Validate data integrity.""" @@ -422,7 +422,7 @@ def _parse_parameter_file(self): ) if hvals["NumFiles"] > 1: - self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix) + self.filename_template = f"{prefix}.%(num)s{self._suffix}" else: self.filename_template = self.parameter_filename @@ -636,7 +636,7 @@ def _set_owls_eagle(self): suffix = self.parameter_filename.rsplit(".", 1)[-1] if self.parameters["NumFiles"] > 1: - self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix) + self.filename_template = f"{prefix}.%(num)i.{suffix}" else: self.filename_template = self.parameter_filename diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index c0168d38a9d..2808cfec7bd 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -56,7 +56,7 @@ def _read_particle_coords(self, chunks, ptf): for ptype in sorted(ptf): if data_file.total_particles[ptype] == 0: continue - c = f["/%s/Coordinates" % ptype][si:ei, :].astype("float64") + c = f[f"/{ptype}/Coordinates"][si:ei, :].astype("float64") x, y, z = (np.squeeze(_) for _ in np.split(c, 3, axis=1)) if ptype == self.ds._sph_ptypes[0]: pdtype = c.dtype @@ -168,7 +168,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for ptype, field_list in sorted(ptf.items()): if data_file.total_particles[ptype] == 0: continue - g = f["/%s" % ptype] + g = f[f"/{ptype}"] if getattr(selector, "is_all_data", False): mask = slice(None, None, None) mask_sum = data_file.total_particles[ptype] @@ -232,7 +232,7 @@ def _count_particles(self, data_file): f.close() if None not in (si, ei): np.clip(pcount - si, 0, ei - si, out=pcount) - npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) + npart = dict((f"PartType{i}", v) for i, v in enumerate(pcount)) return npart def _identify_fields(self, data_file): @@ -507,7 +507,7 @@ def _calculate_field_offsets( elif self._format == 1: pos += 4 else: - raise RuntimeError("incorrect Gadget format %s!" % str(self._format)) + raise RuntimeError(f"incorrect Gadget format {str(self._format)}!") any_ptypes = False for ptype in self._ptypes: if field == "Mass" and ptype not in self.var_mass: diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 37e80e34a7d..69f2f5cbe54 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -75,7 +75,7 @@ def _set_units(self): ) # Comoving lengths for my_unit in ["m", "pc", "AU"]: - new_unit = "%scm" % my_unit + new_unit = f"{my_unit}cm" # technically not true, but should be ok self.unit_registry.add( new_unit, @@ -405,7 +405,7 @@ def _snapshot_format(self, index=None): count = "*" else: count = "%03d" % index - filename = "%s_%s%s" % (self.parameters["SnapshotFileBase"], count, suffix) + filename = f"{self.parameters['SnapshotFileBase']}_{count}{suffix}" return os.path.join(self.data_dir, filename) def _get_all_outputs(self, find_outputs=False): @@ -545,5 +545,5 @@ def _write_cosmology_outputs(self, filename, outputs, start_index, decimals=3): mylog.info("Writing redshift output list to %s.", filename) f = open(filename, "w") for output in outputs: - f.write("%f\n" % (1.0 / (1.0 + output["redshift"]))) + f.write(f"{1.0 / (1.0 + output['redshift']):f}\n") f.close() diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 1874a61d55e..bc17b83ed6e 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -141,7 +141,7 @@ def _read_particle_positions(self, ptype, f=None): else: close = False - pos = f[ptype]["%sPos" % ptype][()].astype("float64") + pos = f[ptype][f"{ptype}Pos"][()].astype("float64") if close: f.close() @@ -240,7 +240,7 @@ def _parse_parameter_file(self): ) ) suffix = self.parameter_filename.rsplit(".", 1)[-1] - self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix) + self.filename_template = f"{prefix}.%(num)i.{suffix}" self.file_count = self.parameters["NumFiles"] self.particle_types = ("Group", "Subhalo") self.particle_types_raw = ("Group", "Subhalo") @@ -493,11 +493,11 @@ def set_code_units(self): def _set_code_unit_attributes(self): for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: - my_unit = "%s_unit" % unit + my_unit = f"{unit}_unit" setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) def __repr__(self): - return "%s" % self.real_ds + return f"{self.real_ds}" def _setup_classes(self): self.objects = [] @@ -584,8 +584,7 @@ class GagdetFOFHaloContainer(YTSelectionContainer): def __init__(self, ptype, particle_identifier, ds=None): if ptype not in ds.particle_types_raw: raise RuntimeError( - 'Possible halo types are %s, supplied "%s".' - % (ds.particle_types_raw, ptype) + f'Possible halo types are {ds.particle_types_raw}, supplied "{ptype}".' ) self.ptype = ptype @@ -620,13 +619,13 @@ def __init__(self, ptype, particle_identifier, ds=None): ptype, self.particle_identifier ) - halo_fields = ["%sLen" % ptype] + halo_fields = [f"{ptype}Len"] if ptype == "Subhalo": halo_fields.append("SubhaloGrNr") my_data = self.index._get_halo_values( ptype, np.array([self.particle_identifier]), halo_fields ) - self.particle_number = np.int64(my_data["%sLen" % ptype][0]) + self.particle_number = np.int64(my_data[f"{ptype}Len"][0]) if ptype == "Group": self.group_identifier = self.particle_identifier @@ -713,7 +712,7 @@ def __init__(self, ptype, particle_identifier, ds=None): self.field_data_end = self.field_data_end.astype(np.int64) for attr in ["mass", "position", "velocity"]: - setattr(self, attr, self[self.ptype, "particle_%s" % attr][0]) + setattr(self, attr, self[self.ptype, f"particle_{attr}"][0]) def __repr__(self): return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier) diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index 5bf4bd22917..290cbef449f 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -44,7 +44,7 @@ def _yield_coordinates(self, data_file): pcount = data_file.total_particles[ptype] if pcount == 0: continue - coords = f[ptype]["%sPos" % ptype][()].astype("float64") + coords = f[ptype][f"{ptype}Pos"][()].astype("float64") coords = np.resize(coords, (pcount, 3)) yield ptype, coords diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index e45aa626315..9f6ad62032e 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -271,7 +271,7 @@ def _set_code_unit_attributes(self): ("mass", 1.0, "g"), ("magnetic", np.sqrt(4.0 * np.pi), "gauss"), ]: - setdefaultattr(self, "%s_unit" % unit, self.quan(value, cgs)) + setdefaultattr(self, f"{unit}_unit", self.quan(value, cgs)) if len(self.units_override) == 0: mylog.warning("Assuming %8s unit = %f %s", unit, value, cgs) diff --git a/yt/frontends/gamer/fields.py b/yt/frontends/gamer/fields.py index 812fe037b88..ae762426123 100644 --- a/yt/frontends/gamer/fields.py +++ b/yt/frontends/gamer/fields.py @@ -56,13 +56,13 @@ def setup_fluid_fields(self): # velocity def velocity_xyz(v): def _velocity(field, data): - return data["gas", "momentum_%s" % v] / data["gas", "density"] + return data["gas", f"momentum_{v}"] / data["gas", "density"] return _velocity for v in "xyz": self.add_field( - ("gas", "velocity_%s" % v), + ("gas", f"velocity_{v}"), sampling_type="cell", function=velocity_xyz(v), units=unit_system["velocity"], @@ -162,7 +162,7 @@ def _temperature(field, data): # magnetic field aliases --> magnetic_field_x/y/z if self.ds.mhd: - setup_magnetic_field_aliases(self, "gamer", ["CCMag%s" % v for v in "XYZ"]) + setup_magnetic_field_aliases(self, "gamer", [f"CCMag{v}" for v in "XYZ"]) def setup_particle_fields(self, ptype): super(GAMERFieldInfo, self).setup_particle_fields(ptype) diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index 859b5b7e7b9..33f7a7b8389 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -185,7 +185,7 @@ def _set_code_unit_attributes(self): # This should be improved. h5f = h5py.File(self.parameter_filename, mode="r") for field_name in h5f["/field_types"]: - current_field = h5f["/field_types/%s" % field_name] + current_field = h5f[f"/field_types/{field_name}"] if "field_to_cgs" in current_field.attrs: field_conv = current_field.attrs["field_to_cgs"] self.field_units[field_name] = just_one(field_conv) @@ -201,7 +201,7 @@ def _set_code_unit_attributes(self): if "dataset_units" in h5f: for unit_name in h5f["/dataset_units"]: - current_unit = h5f["/dataset_units/%s" % unit_name] + current_unit = h5f[f"/dataset_units/{unit_name}"] value = current_unit.value unit = current_unit.attrs["unit"] # need to convert to a Unit object and check dimensions diff --git a/yt/frontends/gdf/fields.py b/yt/frontends/gdf/fields.py index 46a917daa7f..f9a36f4b099 100644 --- a/yt/frontends/gdf/fields.py +++ b/yt/frontends/gdf/fields.py @@ -23,5 +23,5 @@ def setup_fluid_fields(self): from yt.fields.magnetic_field import setup_magnetic_field_aliases setup_magnetic_field_aliases( - self, "gdf", ["magnetic_field_%s" % ax for ax in "xyz"] + self, "gdf", [f"magnetic_field_{ax}" for ax in "xyz"] ) diff --git a/yt/frontends/gdf/io.py b/yt/frontends/gdf/io.py index df6820b0393..be5ac4a9731 100644 --- a/yt/frontends/gdf/io.py +++ b/yt/frontends/gdf/io.py @@ -11,7 +11,7 @@ def _grid_dname(grid_id): def _field_dname(grid_id, field_name): - return "%s/%s" % (_grid_dname(grid_id), field_name) + return f"{_grid_dname(grid_id)}/{field_name}" # TODO all particle bits were removed diff --git a/yt/frontends/gizmo/fields.py b/yt/frontends/gizmo/fields.py index 3dc6d632cbd..09ab0a15043 100644 --- a/yt/frontends/gizmo/fields.py +++ b/yt/frontends/gizmo/fields.py @@ -95,11 +95,11 @@ def _h_p1_density(field, data): def _nuclei_mass_density_field(field, data): species = field.name[1][: field.name[1].find("_")] - return data[ptype, "density"] * data[ptype, "%s_metallicity" % species] + return data[ptype, "density"] * data[ptype, f"{species}_metallicity"] for species in ["H", "H_p0", "H_p1"]: for suf in ["_density", "_number_density"]: - field = "%s%s" % (species, suf) + field = f"{species}{suf}" self.alias(("gas", field), (ptype, field)) if (ptype, "ElectronAbundance") in self.field_list: @@ -119,14 +119,14 @@ def _el_number_density(field, data): for species in self.nuclei_names: self.add_field( - (ptype, "%s_nuclei_mass_density" % species), + (ptype, f"{species}_nuclei_mass_density"), sampling_type="particle", function=_nuclei_mass_density_field, units=self.ds.unit_system["density"], ) for suf in ["_nuclei_mass_density", "_metallicity"]: - field = "%s%s" % (species, suf) + field = f"{species}{suf}" self.alias(("gas", field), (ptype, field)) def _metal_density_field(field, data): diff --git a/yt/frontends/gizmo/tests/test_outputs.py b/yt/frontends/gizmo/tests/test_outputs.py index 12c49dfd8a7..312544a9045 100644 --- a/yt/frontends/gizmo/tests/test_outputs.py +++ b/yt/frontends/gizmo/tests/test_outputs.py @@ -65,11 +65,11 @@ def test_gas_particle_fields(): # Add species fields for species in ["H_p0", "H_p1"]: for suffix in ["density", "fraction", "mass", "number_density"]: - derived_fields += ["%s_%s" % (species, suffix)] + derived_fields += [f"{species}_{suffix}"] for species in metal_elements: - derived_fields += ["%s_nuclei_mass_density" % species] + derived_fields += [f"{species}_nuclei_mass_density"] # Add magnetic fields - derived_fields += ["particle_magnetic_field_%s" % axis for axis in "xyz"] + derived_fields += [f"particle_magnetic_field_{axis}" for axis in "xyz"] # Check for field in derived_fields: assert (ptype, field) in ds.derived_field_list @@ -78,11 +78,11 @@ def test_gas_particle_fields(): derived_fields = [] for species in ["H_p0", "H_p1"]: for suffix in ["density", "number_density"]: - derived_fields += ["%s_%s" % (species, suffix)] + derived_fields += [f"{species}_{suffix}"] for species in metal_elements: for suffix in ["nuclei_mass_density", "metallicity"]: - derived_fields += ["%s_%s" % (species, suffix)] - derived_fields += ["magnetic_field_%s" % axis for axis in "xyz"] + derived_fields += [f"{species}_{suffix}"] + derived_fields += [f"magnetic_field_{axis}" for axis in "xyz"] for field in derived_fields: assert (ptype, field) in ds.derived_field_list diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 5919b95ee77..5984121ed3a 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -85,7 +85,7 @@ def _read_particle_positions(self, ptype, f=None): pcount = self.header["num_halos"] pos = np.empty((pcount, 3), dtype="float64") for i, ax in enumerate("xyz"): - pos[:, i] = f["particle_position_%s" % ax][()] + pos[:, i] = f[f"particle_position_{ax}"][()] if close: f.close() @@ -129,7 +129,7 @@ def _parse_parameter_file(self): self.domain_dimensions = np.ones(self.dimensionality, "int32") self.periodicity = (True, True, True) prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2]) - self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix) + self.filename_template = f"{prefix}.%(num)s{self._suffix}" self.file_count = len(glob.glob(prefix + "*" + self._suffix)) self.particle_types = "halos" self.particle_types_raw = "halos" diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 75383a43abf..2a7a53816f5 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -33,8 +33,7 @@ def test_halo_catalog(self): rs = np.random.RandomState(3670474) n_halos = 100 fields = [ - "particle_%s" % name - for name in ["mass"] + ["position_%s" % ax for ax in "xyz"] + f"particle_{name}" for name in ["mass"] + [f"position_{ax}" for ax in "xyz"] ] units = ["g"] + ["cm"] * 3 data = dict( @@ -59,8 +58,7 @@ def test_halo_catalog_boundary_particles(self): rs = np.random.RandomState(3670474) n_halos = 100 fields = [ - "particle_%s" % name - for name in ["mass"] + ["position_%s" % ax for ax in "xyz"] + f"particle_{name}" for name in ["mass"] + [f"position_{ax}" for ax in "xyz"] ] units = ["g"] + ["cm"] * 3 data = dict( diff --git a/yt/frontends/http_stream/io.py b/yt/frontends/http_stream/io.py index 6f5c92917ff..61758318f38 100644 --- a/yt/frontends/http_stream/io.py +++ b/yt/frontends/http_stream/io.py @@ -20,7 +20,7 @@ def __init__(self, ds): def _open_stream(self, data_file, field): # This does not actually stream yet! ftype, fname = field - s = "%s/%s/%s/%s" % (self._url, data_file.file_id, ftype, fname) + s = f"{self._url}/{data_file.file_id}/{ftype}/{fname}" mylog.info("Loading URL %s", s) requests = get_requests() resp = requests.get(s) diff --git a/yt/frontends/moab/io.py b/yt/frontends/moab/io.py index ab0488ebf9b..435bdc70adc 100644 --- a/yt/frontends/moab/io.py +++ b/yt/frontends/moab/io.py @@ -5,7 +5,7 @@ def field_dname(field_name): - return "/tstt/elements/Hex8/tags/{0}".format(field_name) + return f"/tstt/elements/Hex8/tags/{field_name}" # TODO all particle bits were removed diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index b63ce96da4b..d910b6b70db 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -505,7 +505,7 @@ def _set_paths(self, handle, path, iteration): if "groupBased" in encoding and len(iterations) > 1: mylog.warning("Only chose to load one iteration (%s)", iteration) - self.base_path = "/data/{}/".format(iteration) + self.base_path = f"/data/{iteration}/" try: self.meshes_path = self._handle["/"].attrs["meshesPath"].decode() handle[self.base_path + self.meshes_path] @@ -654,7 +654,7 @@ def __getitem__(self, key): self._setup_function(o) return o else: - raise KeyError("Unknown iteration {}".format(key)) + raise KeyError(f"Unknown iteration {key}") def _load(self, it, **kwargs): return OpenPMDDataset(self.filename, iteration=it) diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index 1a7b69622e6..7063483ba24 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -33,7 +33,7 @@ def poynting(field, data): for ax in "xyz": self.add_field( - ("openPMD", "poynting_vector_%s" % ax), + ("openPMD", f"poynting_vector_{ax}"), sampling_type="cell", function=_get_poyn(ax), units="W/m**2", @@ -65,7 +65,7 @@ def setup_velocity(self, ptype): def _get_vel(axis): def velocity(field, data): c = speed_of_light - momentum = data[ptype, "particle_momentum_{}".format(axis)] + momentum = data[ptype, f"particle_momentum_{axis}"] mass = data[ptype, "particle_mass"] weighting = data[ptype, "particle_weighting"] return momentum / np.sqrt( @@ -76,7 +76,7 @@ def velocity(field, data): for ax in "xyz": self.add_field( - (ptype, "particle_velocity_%s" % ax), + (ptype, f"particle_velocity_{ax}"), sampling_type="particle", function=_get_vel(ax), units="m/s", @@ -87,15 +87,15 @@ def setup_absolute_positions(self, ptype): def _abs_pos(axis): def ap(field, data): return np.add( - data[ptype, "particle_positionCoarse_{}".format(axis)], - data[ptype, "particle_positionOffset_{}".format(axis)], + data[ptype, f"particle_positionCoarse_{axis}"], + data[ptype, f"particle_positionOffset_{axis}"], ) return ap for ax in "xyz": self.add_field( - (ptype, "particle_position_%s" % ax), + (ptype, f"particle_position_{ax}"), sampling_type="particle", function=_abs_pos(ax), units="m", diff --git a/yt/frontends/open_pmd/misc.py b/yt/frontends/open_pmd/misc.py index b0d41368146..3435add43ab 100644 --- a/yt/frontends/open_pmd/misc.py +++ b/yt/frontends/open_pmd/misc.py @@ -44,7 +44,7 @@ def parse_unit_dimension(unit_dimension): si = ["m", "kg", "s", "A", "C", "mol", "cd"] for i in np.arange(7): if unit_dimension[i] != 0: - dim.append("{}**{}".format(si[i], unit_dimension[i])) + dim.append(f"{si[i]}**{unit_dimension[i]}") return "*".join(dim) diff --git a/yt/frontends/owls/simulation_handling.py b/yt/frontends/owls/simulation_handling.py index 370079b38ee..ecc1b1abe92 100644 --- a/yt/frontends/owls/simulation_handling.py +++ b/yt/frontends/owls/simulation_handling.py @@ -56,6 +56,6 @@ def _snapshot_format(self, index=None): count = "*" else: count = "%03d" % index - keyword = "%s_%s" % (self.parameters["SnapshotFileBase"], count) - filename = os.path.join(keyword, "%s%s" % (keyword, suffix)) + keyword = f"{self.parameters['SnapshotFileBase']}_{count}" + filename = os.path.join(keyword, f"{keyword}{suffix}") return os.path.join(data_dir, filename) diff --git a/yt/frontends/owls_subfind/data_structures.py b/yt/frontends/owls_subfind/data_structures.py index c6cd1fd8b62..7909feed510 100644 --- a/yt/frontends/owls_subfind/data_structures.py +++ b/yt/frontends/owls_subfind/data_structures.py @@ -132,7 +132,7 @@ def _parse_parameter_file(self): ) suffix = self.parameter_filename.rsplit(".", 1)[-1] - self.filename_template = "%s.%%(num)i.%s" % (prefix, suffix) + self.filename_template = f"{prefix}.%(num)i.{suffix}" self.file_count = len(glob.glob(prefix + "*" + self._suffix)) if self.file_count == 0: raise YTException(message="No data files found.", ds=self) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c723de81c1b..4be5ba45766 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -38,7 +38,7 @@ def __init__(self, ds, domain_id): rootdir = ds.root_folder basedir = os.path.abspath(os.path.dirname(ds.parameter_filename)) basename = "%s/%%s_%s.out%05i" % (basedir, num, domain_id) - part_file_descriptor = "%s/part_file_descriptor.txt" % basedir + part_file_descriptor = f"{basedir}/part_file_descriptor.txt" if ds.num_groups > 0: igroup = ((domain_id - 1) // ds.group_size) + 1 basename = "%s/group_%05i/%%s_%s.out%05i" % ( @@ -50,7 +50,7 @@ def __init__(self, ds, domain_id): else: basename = "%s/%%s_%s.out%05i" % (basedir, num, domain_id) for t in ["grav", "amr"]: - setattr(self, "%s_fn" % t, basename % t) + setattr(self, f"{t}_fn", basename % t) self._part_file_descriptor = part_file_descriptor self._read_amr_header() @@ -485,7 +485,7 @@ def print_stats(self): header = "%3s\t%14s\t%14s" % ("level", "# cells", "# cells^3") print(header) - print("%s" % (len(header.expandtabs()) * "-")) + print(f"{len(header.expandtabs()) * '-'}") for level in range(self.dataset.min_level + self.dataset.max_level + 2): print( "% 3i\t% 14i\t% 14i" @@ -662,7 +662,7 @@ def closure(pfilter, data): for k in particle_families.keys(): mylog.info("Adding particle_type: %s", k) - self.add_particle_filter("%s" % k) + self.add_particle_filter(f"{k}") def __repr__(self): return self.basename.rsplit(".", 1)[0] @@ -820,7 +820,7 @@ def read_namelist(self): with open(namelist_file, "r") as f: nml = f90nml.read(f) except ImportError as e: - nml = "An error occurred when reading the namelist: %s" % str(e) + nml = f"An error occurred when reading the namelist: {str(e)}" except (ValueError, StopIteration) as e: mylog.warning( "Could not parse `namelist.txt` file as it was malformed: %s", e diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index b20ad4f94c8..f46181160ac 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -80,7 +80,7 @@ def __init__(self, domain): igroup = ((domain.domain_id - 1) // ds.group_size) + 1 full_path = os.path.join( basename, - "group_{:05d}".format(igroup), + f"group_{igroup:05d}", self.fname.format(iout=iout, icpu=domain.domain_id), ) else: @@ -92,8 +92,7 @@ def __init__(self, domain): self.fname = full_path else: raise FileNotFoundError( - "Could not find fluid file (type: %s). Tried %s" - % (self.ftype, full_path) + f"Could not find fluid file (type: {self.ftype}). Tried {full_path}" ) if self.file_descriptor is not None: @@ -442,10 +441,10 @@ def detect_fields(cls, ds): ndim = ds.dimensionality if nvar == ndim + 1: - fields = ["potential"] + ["%s-acceleration" % k for k in "xyz"[:ndim]] + fields = ["potential"] + [f"{k}-acceleration" for k in "xyz"[:ndim]] ndetected = ndim else: - fields = ["%s-acceleration" % k for k in "xyz"[:ndim]] + fields = [f"{k}-acceleration" for k in "xyz"[:ndim]] ndetected = ndim if ndetected != nvar and not ds._warned_extra_fields["gravity"]: @@ -453,7 +452,7 @@ def detect_fields(cls, ds): ds._warned_extra_fields["gravity"] = True for i in range(nvar - ndetected): - fields.append("var%s" % i) + fields.append(f"var{i}") cls.field_list = [(cls.ftype, e) for e in fields] diff --git a/yt/frontends/ramses/fields.py b/yt/frontends/ramses/fields.py index 8185ad5c03a..bbea80c2d0a 100644 --- a/yt/frontends/ramses/fields.py +++ b/yt/frontends/ramses/fields.py @@ -211,15 +211,15 @@ def create_magnetic_fields(self): def mag_field(ax): def _mag_field(field, data): return ( - data["magnetic_field_%s_left" % ax] - + data["magnetic_field_%s_right" % ax] + data[f"magnetic_field_{ax}_left"] + + data[f"magnetic_field_{ax}_right"] ) / 2 return _mag_field for ax in self.ds.coordinates.axis_order: self.add_field( - ("gas", "magnetic_field_%s" % ax), + ("gas", f"magnetic_field_{ax}"), sampling_type="cell", function=mag_field(ax), units=self.ds.unit_system["magnetic_field_cgs"], @@ -230,8 +230,8 @@ def _divB(field, data): out = np.zeros_like(data["magnetic_field_x_right"]) for ax in data.ds.coordinates.axis_order: out += ( - data["magnetic_field_%s_right" % ax] - - data["magnetic_field_%s_left" % ax] + data[f"magnetic_field_{ax}_right"] + - data[f"magnetic_field_{ax}_left"] ) return out / data["dx"] @@ -290,14 +290,14 @@ def _species_mass(field, data): # Adding the fields in the rt_ files def gen_pdens(igroup): def _photon_density(field, data): - rv = data["ramses-rt", "Photon_density_%s" % (igroup + 1)] * dens_conv + rv = data["ramses-rt", f"Photon_density_{igroup + 1}"] * dens_conv return rv return _photon_density for igroup in range(ngroups): self.add_field( - ("rt", "photon_density_%s" % (igroup + 1)), + ("rt", f"photon_density_{igroup + 1}"), sampling_type="cell", function=gen_pdens(igroup), units=self.ds.unit_system["number_density"], @@ -307,10 +307,7 @@ def _photon_density(field, data): def gen_flux(key, igroup): def _photon_flux(field, data): - rv = ( - data["ramses-rt", "Photon_flux_%s_%s" % (key, igroup + 1)] - * flux_conv - ) + rv = data["ramses-rt", f"Photon_flux_{key}_{igroup + 1}"] * flux_conv return rv return _photon_flux @@ -321,7 +318,7 @@ def _photon_flux(field, data): for key in "xyz": for igroup in range(ngroups): self.add_field( - ("rt", "photon_flux_%s_%s" % (key, igroup + 1)), + ("rt", f"photon_flux_{key}_{igroup + 1}"), sampling_type="cell", function=gen_flux(key, igroup), units=flux_unit, diff --git a/yt/frontends/ramses/io.py b/yt/frontends/ramses/io.py index 3e06a48273f..539c167d383 100644 --- a/yt/frontends/ramses/io.py +++ b/yt/frontends/ramses/io.py @@ -133,7 +133,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): def _read_particle_coords(self, chunks, ptf): pn = "particle_position_%s" fields = [ - (ptype, "particle_position_%s" % ax) + (ptype, f"particle_position_{ax}") for ptype, field_list in ptf.items() for ax in "xyz" ] @@ -256,7 +256,7 @@ def _read_part_file_descriptor(fname): if varname in mapping: varname = mapping[varname] else: - varname = "particle_%s" % varname + varname = f"particle_{varname}" fields.append((varname, dtype)) else: @@ -284,9 +284,7 @@ def _read_fluid_file_descriptor(fname): mapping += [ (key, key) for key in ( - "B_{0}_{1}".format(dim, side) - for side in ["left", "right"] - for dim in ["x", "y", "z"] + f"B_{dim}_{side}" for side in ["left", "right"] for dim in ["x", "y", "z"] ) ] @@ -318,7 +316,7 @@ def _read_fluid_file_descriptor(fname): if varname in mapping: varname = mapping[varname] else: - varname = "hydro_%s" % varname + varname = f"hydro_{varname}" fields.append((varname, dtype)) else: diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 2e11bd4e87a..5223ebf64d2 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -76,7 +76,7 @@ def __init__(self, ds, domain): igroup = ((domain.domain_id - 1) // ds.group_size) + 1 full_path = os.path.join( basename, - "group_{:05d}".format(igroup), + f"group_{igroup:05d}", self.fname.format(iout=iout, icpu=domain.domain_id), ) else: @@ -88,8 +88,7 @@ def __init__(self, ds, domain): self.fname = full_path else: raise FileNotFoundError( - "Could not find particle file (type: %s). Tried %s" - % (self.ptype, full_path) + f"Could not find particle file (type: {self.ptype}). Tried {full_path}" ) if self.file_descriptor is not None: @@ -353,7 +352,7 @@ def read_header(self): # Note: this follows RAMSES convention. for i in range(self.ds.dimensionality * 2 + 1): for ilvl in range(self.ds.max_level + 1): - fields.append(("particle_prop_%s_%s" % (ilvl, i), "d")) + fields.append((f"particle_prop_{ilvl}_{i}", "d")) field_offsets = {} _pfields = {} diff --git a/yt/frontends/ramses/tests/test_outputs.py b/yt/frontends/ramses/tests/test_outputs.py index 99f86723069..20bd6edeba8 100644 --- a/yt/frontends/ramses/tests/test_outputs.py +++ b/yt/frontends/ramses/tests/test_outputs.py @@ -139,7 +139,7 @@ def test_extra_fields(): @requires_file(ramsesExtraFieldsSmall) def test_extra_fields_2(): - extra_fields = ["particle_extra_field_%s" % (i + 1) for i in range(2)] + extra_fields = [f"particle_extra_field_{i + 1}" for i in range(2)] ds = yt.load(os.path.join(ramsesExtraFieldsSmall, "info_00001.txt")) # the dataset should contain the fields @@ -356,12 +356,12 @@ def test_grav_detection(): # Test detection for k in "xyz": - assert ("gravity", "%s-acceleration" % k) in ds.field_list - assert ("gas", "acceleration_%s" % k) in ds.derived_field_list + assert ("gravity", f"{k}-acceleration") in ds.field_list + assert ("gas", f"acceleration_{k}") in ds.derived_field_list # Test access for k in "xyz": - ds.r["gas", "acceleration_%s" % k] + ds.r["gas", f"acceleration_{k}"] @requires_file(ramses_sink) diff --git a/yt/frontends/rockstar/data_structures.py b/yt/frontends/rockstar/data_structures.py index 4fd4f5ddf23..aa6ecdca061 100644 --- a/yt/frontends/rockstar/data_structures.py +++ b/yt/frontends/rockstar/data_structures.py @@ -40,7 +40,7 @@ def _read_particle_positions(self, ptype, f=None): f.seek(self._position_offset, os.SEEK_SET) halos = np.fromfile(f, dtype=self.io._halo_dt, count=pcount) for i, ax in enumerate("xyz"): - pos[:, i] = halos["particle_position_%s" % ax].astype("float64") + pos[:, i] = halos[f"particle_position_{ax}"].astype("float64") if close: f.close() @@ -77,7 +77,7 @@ def _parse_parameter_file(self): self.dimensionality = 3 self.refine_by = 2 prefix = ".".join(self.parameter_filename.rsplit(".", 2)[:-2]) - self.filename_template = "%s.%%(num)s%s" % (prefix, self._suffix) + self.filename_template = f"{prefix}.%(num)s{self._suffix}" self.file_count = len(glob.glob(prefix + ".*" + self._suffix)) # Now we can set up things we already know. diff --git a/yt/frontends/sdf/data_structures.py b/yt/frontends/sdf/data_structures.py index 0466a4556bb..cccaf8e97e8 100644 --- a/yt/frontends/sdf/data_structures.py +++ b/yt/frontends/sdf/data_structures.py @@ -105,16 +105,16 @@ def _parse_parameter_file(self): if "offset_center" in self.parameters and self.parameters["offset_center"]: self.domain_left_edge = np.array([0, 0, 0], dtype=np.float64) self.domain_right_edge = np.array( - [2.0 * self.parameters.get("R%s" % ax, R0) for ax in "xyz"], + [2.0 * self.parameters.get(f"R{ax}", R0) for ax in "xyz"], dtype=np.float64, ) else: self.domain_left_edge = np.array( - [-self.parameters.get("R%s" % ax, R0) for ax in "xyz"], + [-self.parameters.get(f"R{ax}", R0) for ax in "xyz"], dtype=np.float64, ) self.domain_right_edge = np.array( - [+self.parameters.get("R%s" % ax, R0) for ax in "xyz"], + [+self.parameters.get(f"R{ax}", R0) for ax in "xyz"], dtype=np.float64, ) self.domain_left_edge *= self.parameters.get("a", 1.0) diff --git a/yt/frontends/sph/data_structures.py b/yt/frontends/sph/data_structures.py index ac292fbac05..01a84c3d4d1 100644 --- a/yt/frontends/sph/data_structures.py +++ b/yt/frontends/sph/data_structures.py @@ -48,7 +48,7 @@ def num_neighbors(self): @num_neighbors.setter def num_neighbors(self, value): if value < 0: - raise ValueError("Negative value not allowed: %s" % value) + raise ValueError(f"Negative value not allowed: {value}") self._num_neighbors = value @property diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 1270658d5a9..ac71ef66a64 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -284,7 +284,7 @@ def __init__( self.geometry = geometry self.stream_handler = stream_handler self._find_particle_types() - name = "InMemoryParameterFile_%s" % uuid.uuid4().hex + name = f"InMemoryParameterFile_{uuid.uuid4().hex}" from yt.data_objects.static_output import _cached_datasets _cached_datasets[name] = self @@ -342,7 +342,7 @@ def _set_code_unit_attributes(self): elif isinstance(unit, tuple): uq = self.quan(unit[0], unit[1]) else: - raise RuntimeError("%s (%s) is invalid." % (attr, unit)) + raise RuntimeError(f"{attr} ({unit}) is invalid.") setattr(self, attr, uq) @classmethod @@ -441,7 +441,7 @@ def assign_particle_data(ds, pdata, bbox): for ptype in ds.particle_types_raw: if (ptype, "particle_position_x") in pdata: - x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in "xyz") + x, y, z = (pdata[ptype, f"particle_position_{ax}"] for ax in "xyz") elif (ptype, "particle_position") in pdata: x, y, z = pdata[ptype, "particle_position"].T else: diff --git a/yt/frontends/stream/fields.py b/yt/frontends/stream/fields.py index 690815ebf86..532009e55f5 100644 --- a/yt/frontends/stream/fields.py +++ b/yt/frontends/stream/fields.py @@ -88,7 +88,7 @@ def setup_fluid_fields(self): if units != "": self.add_output_field(field, sampling_type="cell", units=units) setup_magnetic_field_aliases( - self, "stream", ["magnetic_field_%s" % ax for ax in "xyz"] + self, "stream", [f"magnetic_field_{ax}" for ax in "xyz"] ) def add_output_field(self, name, sampling_type, **kwargs): diff --git a/yt/frontends/stream/io.py b/yt/frontends/stream/io.py index 0738292fc84..626ade00278 100644 --- a/yt/frontends/stream/io.py +++ b/yt/frontends/stream/io.py @@ -62,9 +62,7 @@ def _read_particle_coords(self, chunks, ptf): if (ptype, "particle_position") in gf: x, y, z = gf[ptype, "particle_position"].T else: - x, y, z = ( - gf[ptype, "particle_position_%s" % ax] for ax in "xyz" - ) + x, y, z = (gf[ptype, f"particle_position_{ax}"] for ax in "xyz") yield ptype, (x, y, z) def _read_particle_fields(self, chunks, ptf, selector): @@ -78,9 +76,7 @@ def _read_particle_fields(self, chunks, ptf, selector): if (ptype, "particle_position") in gf: x, y, z = gf[ptype, "particle_position"].T else: - x, y, z = ( - gf[ptype, "particle_position_%s" % ax] for ax in "xyz" - ) + x, y, z = (gf[ptype, f"particle_position_{ax}"] for ax in "xyz") mask = selector.select_points(x, y, z, 0.0) if mask is None: continue @@ -151,7 +147,7 @@ def _read_particle_fields(self, chunks, ptf, selector): y = ppos[:, 1] z = ppos[:, 2] else: - x, y, z = (f[ptype, "particle_position_%s" % ax] for ax in "xyz") + x, y, z = (f[ptype, f"particle_position_{ax}"] for ax in "xyz") if (ptype, "smoothing_length") in self.ds.field_list: hsml = f[ptype, "smoothing_length"] else: @@ -172,7 +168,7 @@ def _yield_coordinates(self, data_file, needed_ptype=None): pos = np.column_stack( [ self.fields[data_file.filename][ - (ptype, "particle_position_%s" % ax) + (ptype, f"particle_position_{ax}") ] for ax in "xyz" ] diff --git a/yt/frontends/stream/tests/test_stream_particles.py b/yt/frontends/stream/tests/test_stream_particles.py index 138d6f34b5c..8ec84d08145 100644 --- a/yt/frontends/stream/tests/test_stream_particles.py +++ b/yt/frontends/stream/tests/test_stream_particles.py @@ -356,9 +356,9 @@ def test_load_particles_types(): dd = ds1.all_data() for ax in "xyz": - assert dd["io", "particle_position_%s" % ax].size == num_particles - assert dd["all", "particle_position_%s" % ax].size == num_particles - assert dd["nbody", "particle_position_%s" % ax].size == num_particles + assert dd["io", f"particle_position_{ax}"].size == num_particles + assert dd["all", f"particle_position_{ax}"].size == num_particles + assert dd["nbody", f"particle_position_{ax}"].size == num_particles num_dm_particles = 10000 num_star_particles = 50000 @@ -385,9 +385,9 @@ def test_load_particles_types(): for ax in "xyz": npart = 0 for ptype in ds2.particle_types_raw: - npart += dd[ptype, "particle_position_%s" % ax].size + npart += dd[ptype, f"particle_position_{ax}"].size assert npart == num_tot_particles - assert dd["all", "particle_position_%s" % ax].size == num_tot_particles + assert dd["all", f"particle_position_{ax}"].size == num_tot_particles def test_load_particles_with_data_source(): @@ -396,7 +396,7 @@ def test_load_particles_with_data_source(): # Load from dataset ad = ds1.all_data() fields = ["particle_mass"] - fields += ["particle_position_{}".format(ax) for ax in "xyz"] + fields += [f"particle_position_{ax}" for ax in "xyz"] data = {field: ad[field] for field in fields} ds2 = load_particles(data, data_source=ad) diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py index b0582375a8c..03fea0116a5 100644 --- a/yt/frontends/swift/io.py +++ b/yt/frontends/swift/io.py @@ -33,7 +33,7 @@ def _read_particle_coords(self, chunks, ptf): for ptype in sorted(ptf): if sub_file.total_particles[ptype] == 0: continue - pos = f["/%s/Coordinates" % ptype][si:ei, :] + pos = f[f"/{ptype}/Coordinates"][si:ei, :] pos = pos.astype("float64", copy=False) if ptype == self.ds._sph_ptypes[0]: hsml = self._get_smoothing_length(sub_file) @@ -88,7 +88,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for ptype, field_list in sorted(ptf.items()): if sub_file.total_particles[ptype] == 0: continue - g = f["/%s" % ptype] + g = f[f"/{ptype}"] # this should load as float64 coords = g["Coordinates"][si:ei] if ptype == "PartType0": @@ -120,7 +120,7 @@ def _count_particles(self, data_file): # defined by the subfile if None not in (si, ei): np.clip(pcount - si, 0, ei - si, out=pcount) - npart = dict(("PartType%s" % (i), v) for i, v in enumerate(pcount)) + npart = dict((f"PartType{i}", v) for i, v in enumerate(pcount)) return npart def _identify_fields(self, data_file): diff --git a/yt/frontends/tipsy/data_structures.py b/yt/frontends/tipsy/data_structures.py index cf1f2f8db43..46fbe6aa289 100644 --- a/yt/frontends/tipsy/data_structures.py +++ b/yt/frontends/tipsy/data_structures.py @@ -265,7 +265,7 @@ def _set_code_unit_attributes(self): if isinstance(my_val, tuple) else self.quan(my_val) ) - setattr(self, "%s_unit" % my_unit, my_val) + setattr(self, f"{my_unit}_unit", my_val) # Finally, set the dependent units if self.cosmological_simulation: diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index 6415850b383..416addc5e4f 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -115,7 +115,7 @@ def _read_particle_coords(self, chunks, ptf): @property def hsml_filename(self): - return "%s-%s" % (self.ds.parameter_filename, "hsml") + return f"{self.ds.parameter_filename}-{'hsml'}" def _generate_smoothing_length(self, data_files, kdtree): if os.path.exists(self.hsml_filename): @@ -330,7 +330,7 @@ def _compute_dtypes(cls, field_dtypes, endian="<"): pds = {} for ptype, field in cls._fields: dtbase = field_dtypes.get(field, "f") - ff = "%s%s" % (endian, dtbase) + ff = f"{endian}{dtbase}" if field in cls._vector_fields: dt = (field, [("x", ff), ("y", ff), ("z", ff)]) else: diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 57bd7c1673a..98f8d0a331c 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -92,7 +92,7 @@ def _parse_parameter_file(self): # assign units to parameters that have associated unit string del_pars = [] for par in self.parameters: - ustr = "%s_units" % par + ustr = f"{par}_units" if ustr in self.parameters: if isinstance(self.parameters[par], np.ndarray): to_u = self.arr @@ -136,12 +136,10 @@ def _set_code_unit_attributes(self): self.parameters[attr], YTQuantity ): uq = self.parameters[attr] - elif attr in self.parameters and "%s_units" % attr in self.parameters: - uq = self.quan( - self.parameters[attr], self.parameters["%s_units" % attr] - ) + elif attr in self.parameters and f"{attr}_units" in self.parameters: + uq = self.quan(self.parameters[attr], self.parameters[f"{attr}_units"]) del self.parameters[attr] - del self.parameters["%s_units" % attr] + del self.parameters[f"{attr}_units"] elif isinstance(unit, str): uq = self.quan(1.0, unit) elif isinstance(unit, numeric_type): @@ -151,7 +149,7 @@ def _set_code_unit_attributes(self): elif isinstance(unit, tuple): uq = self.quan(unit[0], unit[1]) else: - raise RuntimeError("%s (%s) is invalid." % (attr, unit)) + raise RuntimeError(f"{attr} ({unit}) is invalid.") setattr(self, attr, uq) @@ -333,7 +331,7 @@ def _restore_light_ray_solution(self): return self.light_ray_solution = [{} for val in self.parameters[lrs_fields[0]]] for sp3 in ["unique_identifier", "filename"]: - ksp3 = "%s_%s" % (key, sp3) + ksp3 = f"{key}_{sp3}" if ksp3 not in lrs_fields: continue self.parameters[ksp3] = self.parameters[ksp3].astype(str) @@ -817,9 +815,7 @@ def _parse_parameter_file(self): ) for a in ["profile_dimensions"] + [ - "%s_%s" % (ax, attr) - for ax in "xyz"[: self.dimensionality] - for attr in ["log"] + f"{ax}_{attr}" for ax in "xyz"[: self.dimensionality] for attr in ["log"] ]: setattr(self, a, self.parameters[a]) @@ -833,15 +829,15 @@ def _parse_parameter_file(self): domain_left_edge = np.zeros(3) domain_right_edge = np.ones(3) for i, ax in enumerate("xyz"[: self.dimensionality]): - range_name = "%s_range" % ax + range_name = f"{ax}_range" my_range = self.parameters[range_name] - if getattr(self, "%s_log" % ax, False): + if getattr(self, f"{ax}_log", False): my_range = np.log10(my_range) domain_left_edge[i] = my_range[0] domain_right_edge[i] = my_range[1] setattr(self, range_name, self.parameters[range_name]) - bin_field = "%s_field" % ax + bin_field = f"{ax}_field" if ( isinstance(self.parameters[bin_field], str) and self.parameters[bin_field] == "None" @@ -876,7 +872,7 @@ def print_key_parameters(self): if is_root(): mylog.info("YTProfileDataset") for a in ["dimensionality", "profile_dimensions"] + [ - "%s_%s" % (ax, attr) + f"{ax}_{attr}" for ax in "xyz"[: self.dimensionality] for attr in ["field", "range", "log"] ]: @@ -924,7 +920,7 @@ def __getitem__(self, field): return g[f][self.global_id] if self.contour_id == -1: return g[f] - cfield = (f[0], "contours_%s" % self.contour_key.decode("utf-8")) + cfield = (f[0], f"contours_{self.contour_key.decode('utf-8')}") if f[0] == "grid": return g[f][g[cfield] == self.contour_id] return self.parent[f][g[cfield] == self.contour_id] diff --git a/yt/frontends/ytdata/tests/test_outputs.py b/yt/frontends/ytdata/tests/test_outputs.py index ba2bfc6bb7c..f58db17f094 100644 --- a/yt/frontends/ytdata/tests/test_outputs.py +++ b/yt/frontends/ytdata/tests/test_outputs.py @@ -63,7 +63,7 @@ def run(self): return np.array([num_e, avg]) def compare(self, new_result, old_result): - err_msg = "YTData field values for %s not equal." % (self.field,) + err_msg = f"YTData field values for {self.field} not equal." if self.decimals is None: assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) else: diff --git a/yt/frontends/ytdata/utilities.py b/yt/frontends/ytdata/utilities.py index 985afb0754d..de0e42213a6 100644 --- a/yt/frontends/ytdata/utilities.py +++ b/yt/frontends/ytdata/utilities.py @@ -227,7 +227,7 @@ def _yt_array_hdf5_attr(fh, attr, val): if val is None: val = "None" if hasattr(val, "units"): - fh.attrs["%s_units" % attr] = str(val.units) + fh.attrs[f"{attr}_units"] = str(val.units) try: fh.attrs[str(attr)] = val # This is raised if no HDF5 equivalent exists. diff --git a/yt/funcs.py b/yt/funcs.py index 6c0365304c3..0e244535b1a 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -159,7 +159,7 @@ def get_memory_usage(subtract_share=False): pagesize = resource.getpagesize() except NameError: return -1024 - status_file = "/proc/%s/statm" % (pid) + status_file = f"/proc/{pid}/statm" if not os.path.isfile(status_file): return -1024 line = open(status_file).read() @@ -267,7 +267,7 @@ def some_really_old_function(...): def run_func(*args, **kwargs): message = "%s has been deprecated and may be removed without notice!" if replacement is not None: - message += " Use %s instead." % replacement + message += f" Use {replacement} instead." warnings.warn( message % func.__name__, VisibleDeprecationWarning, stacklevel=2 ) @@ -510,7 +510,7 @@ def paste_traceback(exc_type, exc, tb): s = s.getvalue() ret = p.pastes.newPaste("pytb", s, None, "", "", True) print() - print("Traceback pasted to http://paste.yt-project.org/show/%s" % (ret)) + print(f"Traceback pasted to http://paste.yt-project.org/show/{ret}") print() @@ -533,7 +533,7 @@ def paste_traceback_detailed(exc_type, exc, tb): ) ret = p.pastes.newPaste("text", s, None, "", "", True) print() - print("Traceback pasted to http://paste.yt-project.org/show/%s" % (ret)) + print(f"Traceback pasted to http://paste.yt-project.org/show/{ret}") print() @@ -581,12 +581,12 @@ def update_git(path): print("update the code. You will have to do this yourself.") print("Here's a set of sample commands:") print("") - print(" $ cd %s" % (path)) + print(f" $ cd {path}") print(" $ git stash") print(" $ git checkout master") print(" $ git pull") print(" $ git stash pop") - print(" $ %s setup.py develop" % (sys.executable)) + print(f" $ {sys.executable} setup.py develop") print("") return 1 if repo.active_branch.name != "master": @@ -594,10 +594,10 @@ def update_git(path): print("update the code. You will have to do this yourself.") print("Here's a set of sample commands:") print("") - print(" $ cd %s" % (path)) + print(f" $ cd {path}") print(" $ git checkout master") print(" $ git pull") - print(" $ %s setup.py develop" % (sys.executable)) + print(f" $ {sys.executable} setup.py develop") print("") return 1 print("Updating the repository") @@ -615,7 +615,7 @@ def update_git(path): master.checkout() remote.pull() new_version = repo.git.rev_parse("HEAD", short=12) - f.write("Updated from %s to %s\n\n" % (old_version, new_version)) + f.write(f"Updated from {old_version} to {new_version}\n\n") rebuild_modules(path, f) print("Updated successfully") @@ -636,9 +636,9 @@ def update_hg(path): print("update the code. You will have to do this yourself.") print("Here's a set of sample commands:") print("") - print(" $ cd %s" % (path)) + print(f" $ cd {path}") print(" $ hg up -C yt # This will delete any unsaved changes") - print(" $ %s setup.py develop" % (sys.executable)) + print(f" $ {sys.executable} setup.py develop") print("") return 1 print("Updating the repository") @@ -649,7 +649,7 @@ def update_hg(path): repo.update("master", check=True) else: repo.update("yt", check=True) - f.write("Updated from %s to %s\n\n" % (ident, repo.identify())) + f.write(f"Updated from {ident} to {repo.identify()}\n\n") rebuild_modules(path, f) print("Updated successfully.") @@ -666,7 +666,7 @@ def rebuild_modules(path, f): f.write(stdout.decode("utf-8")) f.write("\n\n") if p.returncode: - print("BROKEN: See %s" % (os.path.join(path, "yt_updater.log"))) + print(f"BROKEN: See {os.path.join(path, 'yt_updater.log')}") sys.exit(1) f.write("Successful!\n") @@ -799,7 +799,7 @@ def http_error_default(self, url, fp, errcode, errmsg, headers): # This code snippet is modified from Georg Brandl def bb_apicall(endpoint, data, use_pass=True): - uri = "https://api.bitbucket.org/1.0/%s/" % endpoint + uri = f"https://api.bitbucket.org/1.0/{endpoint}/" # since bitbucket doesn't return the required WWW-Authenticate header when # making a request without Authorization, we cannot use the standard urllib2 # auth handlers; we have to add the requisite header from the start @@ -809,8 +809,8 @@ def bb_apicall(endpoint, data, use_pass=True): if use_pass: username = input("Bitbucket Username? ") password = getpass.getpass() - upw = "%s:%s" % (username, password) - req.add_header("Authorization", "Basic %s" % base64.b64encode(upw).strip()) + upw = f"{username}:{password}" + req.add_header("Authorization", f"Basic {base64.b64encode(upw).strip()}") return urllib.request.urlopen(req).read() @@ -835,7 +835,7 @@ def get_yt_supp(): print( "$ hg clone http://bitbucket.org/yt_analysis/yt-supplemental/ ", end=" " ) - print("%s" % (supp_path)) + print(f"{supp_path}") print() sys.exit(1) rv = hglib.clone("http://bitbucket.org/yt_analysis/yt-supplemental/", supp_path) @@ -862,7 +862,7 @@ def fix_length(length, ds): if length_valid_tuple and unit_is_string and length_is_number: return YTArray(*length, registry=registry) else: - raise RuntimeError("Length %s is invalid" % str(length)) + raise RuntimeError(f"Length {str(length)} is invalid") @contextlib.contextmanager @@ -996,12 +996,12 @@ def ensure_dir(path): def validate_width_tuple(width): if not iterable(width) or len(width) != 2: - raise YTInvalidWidthError("width (%s) is not a two element tuple" % width) + raise YTInvalidWidthError(f"width ({width}) is not a two element tuple") is_numeric = isinstance(width[0], numeric_type) length_has_units = isinstance(width[0], YTArray) unit_is_string = isinstance(width[1], str) if not is_numeric or length_has_units and unit_is_string: - msg = "width (%s) is invalid. " % str(width) + msg = f"width ({str(width)}) is invalid. " msg += "Valid widths look like this: (12, 'au')" raise YTInvalidWidthError(msg) @@ -1074,7 +1074,7 @@ def _func(*args, **kwargs): # Note we use SyntaxWarning because by default, DeprecationWarning is # not shown. warnings.warn( - "This usage is deprecated. Please use %s instead." % cls.__name__, + f"This usage is deprecated. Please use {cls.__name__} instead.", SyntaxWarning, stacklevel=2, ) @@ -1189,14 +1189,13 @@ def get_hash(infile, algorithm="md5", BLOCKSIZE=65536): hasher = getattr(hashlib, algorithm)() except Exception: raise NotImplementedError( - "'%s' not available! Available algorithms: %s" - % (algorithm, hashlib.algorithms) + f"'{algorithm}' not available! Available algorithms: {hashlib.algorithms}" ) filesize = os.path.getsize(infile) iterations = int(float(filesize) / float(BLOCKSIZE)) - pbar = get_pbar("Generating %s hash" % algorithm, iterations) + pbar = get_pbar(f"Generating {algorithm} hash", iterations) iter = 0 with open(infile, "rb") as f: diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 32b8f0d71a4..88d5456781b 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -96,7 +96,7 @@ def setup_fields(self, registry): for axi, ax in enumerate(self.axis_order): f1, f2 = _get_coord_fields(axi) registry.add_field( - ("index", "d%s" % ax), + ("index", f"d{ax}"), sampling_type="cell", function=f1, display_field=False, @@ -104,7 +104,7 @@ def setup_fields(self, registry): ) registry.add_field( - ("index", "path_element_%s" % ax), + ("index", f"path_element_{ax}"), sampling_type="cell", function=f1, display_field=False, @@ -112,7 +112,7 @@ def setup_fields(self, registry): ) registry.add_field( - ("index", "%s" % ax), + ("index", f"{ax}"), sampling_type="cell", function=f2, display_field=False, @@ -121,7 +121,7 @@ def setup_fields(self, registry): f3 = _get_vert_fields(axi) registry.add_field( - ("index", "vertex_%s" % ax), + ("index", f"vertex_{ax}"), sampling_type="cell", function=f3, display_field=False, diff --git a/yt/geometry/coordinates/coordinate_handler.py b/yt/geometry/coordinates/coordinate_handler.py index d55660b516b..e941c02998f 100644 --- a/yt/geometry/coordinates/coordinate_handler.py +++ b/yt/geometry/coordinates/coordinate_handler.py @@ -253,7 +253,7 @@ def sanitize_center(self, center, axis): self.ds.index center = (self.ds.domain_left_edge + self.ds.domain_right_edge) / 2 else: - raise RuntimeError('center keyword "%s" not recognized' % center) + raise RuntimeError(f'center keyword "{center}" not recognized') elif isinstance(center, YTArray): return self.ds.arr(center), self.convert_to_cartesian(center) elif iterable(center): @@ -263,14 +263,14 @@ def sanitize_center(self, center, axis): elif center[0].lower() == "max": v, center = self.ds.find_max(center[1]) else: - raise RuntimeError('center keyword "%s" not recognized' % center) + raise RuntimeError(f'center keyword "{center}" not recognized') center = self.ds.arr(center, "code_length") elif iterable(center[0]) and isinstance(center[1], str): center = self.ds.arr(center[0], center[1]) else: center = self.ds.arr(center, "code_length") else: - raise RuntimeError('center keyword "%s" not recognized' % center) + raise RuntimeError(f'center keyword "{center}" not recognized') # This has to return both a center and a display_center display_center = self.convert_to_cartesian(center) return center, display_center diff --git a/yt/geometry/coordinates/geographic_coordinates.py b/yt/geometry/coordinates/geographic_coordinates.py index f390d610f36..a47bf2c3c3e 100644 --- a/yt/geometry/coordinates/geographic_coordinates.py +++ b/yt/geometry/coordinates/geographic_coordinates.py @@ -80,7 +80,7 @@ def setup_fields(self, registry): f1, f2 = _get_coord_fields(self.axis_id[self.radial_axis]) registry.add_field( - ("index", "d%s" % (self.radial_axis,)), + ("index", f"d{self.radial_axis}"), sampling_type="cell", function=f1, display_field=False, @@ -116,10 +116,10 @@ def _SphericalVolume(field, data): registry.alias(("index", "volume"), ("index", "cell_volume")) def _path_radial_axis(field, data): - return data["index", "d%s" % self.radial_axis] + return data["index", f"d{self.radial_axis}"] registry.add_field( - ("index", "path_element_%s" % self.radial_axis), + ("index", f"path_element_{self.radial_axis}"), sampling_type="cell", function=_path_radial_axis, units="code_length", diff --git a/yt/geometry/coordinates/spec_cube_coordinates.py b/yt/geometry/coordinates/spec_cube_coordinates.py index b66f94e5372..072c1ac8f54 100644 --- a/yt/geometry/coordinates/spec_cube_coordinates.py +++ b/yt/geometry/coordinates/spec_cube_coordinates.py @@ -56,7 +56,7 @@ def _length_func(field, data): return _length_func registry.add_field( - ("index", "d%s" % ax), + ("index", f"d{ax}"), sampling_type="cell", function=f1, display_field=False, @@ -64,7 +64,7 @@ def _length_func(field, data): ) registry.add_field( - ("index", "path_element_%s" % ax), + ("index", f"path_element_{ax}"), sampling_type="cell", function=_get_length_func(), display_field=False, @@ -72,7 +72,7 @@ def _length_func(field, data): ) registry.add_field( - ("index", "%s" % ax), + ("index", f"{ax}"), sampling_type="cell", function=f2, display_field=False, diff --git a/yt/geometry/coordinates/tests/test_cartesian_coordinates.py b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py index 14815a83681..d8207a09ea8 100644 --- a/yt/geometry/coordinates/tests/test_cartesian_coordinates.py +++ b/yt/geometry/coordinates/tests/test_cartesian_coordinates.py @@ -16,8 +16,8 @@ def test_cartesian_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) - fp = ("index", "path_element_%s" % axis) + fd = ("index", f"d{axis}") + fp = ("index", f"path_element_{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i]) mi = np.argmin(dd[fi]) diff --git a/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py b/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py index c58957339c4..c47e06846d5 100644 --- a/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py +++ b/yt/geometry/coordinates/tests/test_cylindrical_coordinates.py @@ -18,7 +18,7 @@ def test_cylindrical_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) + fd = ("index", f"d{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) @@ -43,10 +43,10 @@ def create_image(filename_prefix): fields = ["noise%d" % i for i in range(4)] p = SlicePlot(ds, "z", fields) - p.save("%s_log" % filename_prefix) + p.save(f"{filename_prefix}_log") p.set_log("all", False) - p.save("%s_lin" % filename_prefix) + p.save(f"{filename_prefix}_lin") test = GenericImageTest(ds, create_image, 12) test.prefix = "test_noise_plot_lin" diff --git a/yt/geometry/coordinates/tests/test_geographic_coordinates.py b/yt/geometry/coordinates/tests/test_geographic_coordinates.py index bd57593acfc..d138109eca7 100644 --- a/yt/geometry/coordinates/tests/test_geographic_coordinates.py +++ b/yt/geometry/coordinates/tests/test_geographic_coordinates.py @@ -21,7 +21,7 @@ def test_geographic_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) + fd = ("index", f"d{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) @@ -71,7 +71,7 @@ def test_internal_geographic_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) + fd = ("index", f"d{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) diff --git a/yt/geometry/coordinates/tests/test_polar_coordinates.py b/yt/geometry/coordinates/tests/test_polar_coordinates.py index fc8e5110653..378a34adb6c 100644 --- a/yt/geometry/coordinates/tests/test_polar_coordinates.py +++ b/yt/geometry/coordinates/tests/test_polar_coordinates.py @@ -17,7 +17,7 @@ def test_cylindrical_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) + fd = ("index", f"d{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) diff --git a/yt/geometry/coordinates/tests/test_spherical_coordinates.py b/yt/geometry/coordinates/tests/test_spherical_coordinates.py index 8f62ebebe32..3246c34335d 100644 --- a/yt/geometry/coordinates/tests/test_spherical_coordinates.py +++ b/yt/geometry/coordinates/tests/test_spherical_coordinates.py @@ -16,7 +16,7 @@ def test_spherical_coordinates(): for i, axis in enumerate(axes): dd = ds.all_data() fi = ("index", axis) - fd = ("index", "d%s" % axis) + fd = ("index", f"d{axis}") ma = np.argmax(dd[fi]) assert_equal(dd[fi][ma] + dd[fd][ma] / 2.0, ds.domain_right_edge[i].d) mi = np.argmin(dd[fi]) diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index 7763a2cb993..1544fe2ce3e 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -61,11 +61,11 @@ def _initialize_data_storage(self): fn = self.ds.storage_filename if fn is None: if os.path.isfile( - os.path.join(self.directory, "%s.yt" % self.ds.unique_identifier) + os.path.join(self.directory, f"{self.ds.unique_identifier}.yt") ): - fn = os.path.join(self.directory, "%s.yt" % self.ds.unique_identifier) + fn = os.path.join(self.directory, f"{self.ds.unique_identifier}.yt") else: - fn = os.path.join(self.directory, "%s.yt" % self.dataset.basename) + fn = os.path.join(self.directory, f"{self.dataset.basename}.yt") dir_to_check = os.path.dirname(fn) if dir_to_check == "": dir_to_check = "." @@ -178,7 +178,7 @@ def get_data(self, node, name): if self._data_file is None: return None if node[0] != "/": - node = "/%s" % node + node = f"/{node}" myGroup = self._data_file["/"] for group in node.split("/"): @@ -189,7 +189,7 @@ def get_data(self, node, name): if name not in myGroup: return None - full_name = "%s/%s" % (node, name) + full_name = f"{node}/{name}" try: return self._data_file[full_name][:] except TypeError: @@ -269,7 +269,7 @@ def _chunk(self, dobj, chunking_style, ngz=0, **kwargs): def cached_property(func): - n = "_%s" % func.__name__ + n = f"_{func.__name__}" def cached_func(self): if self._cache and getattr(self, n, None) is not None: @@ -308,7 +308,7 @@ def __init__( def _accumulate_values(self, method): # We call this generically. It's somewhat slower, since we're doing # costly getattr functions, but this allows us to generalize. - mname = "select_%s" % method + mname = f"select_{method}" arrs = [] for obj in self._fast_index or self.objs: f = getattr(obj, mname) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 87db8ff1538..196acf3e37e 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -215,7 +215,7 @@ def print_stats(self): """ header = "%3s\t%6s\t%14s\t%14s" % ("level", "# grids", "# cells", "# cells^3") print(header) - print("%s" % (len(header.expandtabs()) * "-")) + print(f"{len(header.expandtabs()) * '-'}") for level in range(MAXLEVEL): if (self.level_stats["numgrids"][level]) == 0: continue @@ -444,7 +444,7 @@ def _chunk_io( size = self._grid_chunksize else: raise RuntimeError( - "%s is an invalid value for the 'chunk_sizing' argument." % chunk_sizing + f"{chunk_sizing} is an invalid value for the 'chunk_sizing' argument." ) for fn in sorted(gfiles): gs = gfiles[fn] @@ -464,7 +464,7 @@ def _chunk_io( def _add_mesh_sampling_particle_field(self, deposit_field, ftype, ptype): units = self.ds.field_info[ftype, deposit_field].units take_log = self.ds.field_info[ftype, deposit_field].take_log - field_name = "cell_%s_%s" % (ftype, deposit_field) + field_name = f"cell_{ftype}_{deposit_field}" def _mesh_sampling_particle_field(field, data): pos = data[ptype, "particle_position"] diff --git a/yt/geometry/object_finding_mixin.py b/yt/geometry/object_finding_mixin.py index 4688d7c61d3..6cb7d67fa7c 100644 --- a/yt/geometry/object_finding_mixin.py +++ b/yt/geometry/object_finding_mixin.py @@ -61,8 +61,8 @@ def find_max_cell_location(self, field, finest_levels=3): ) max_val, mx, my, mz = source.quantities["MaxLocation"](field) mylog.info("Max Value is %0.5e at %0.16f %0.16f %0.16f", max_val, mx, my, mz) - self.parameters["Max%sValue" % (field)] = max_val - self.parameters["Max%sPos" % (field)] = "%s" % ((mx, my, mz),) + self.parameters[f"Max{field}Value"] = max_val + self.parameters[f"Max{field}Pos"] = f"{mx, my, mz}" return max_val, np.array((mx, my, mz), dtype="float64") def find_min(self, field): @@ -90,8 +90,8 @@ def find_min(self, field): minGrid.Level, ) self.center = pos - self.parameters["Min%sValue" % (field)] = minVal - self.parameters["Min%sPos" % (field)] = "%s" % (pos) + self.parameters[f"Min{field}Value"] = minVal + self.parameters[f"Min{field}Pos"] = f"{pos}" return minVal, pos def find_point(self, coord): diff --git a/yt/geometry/oct_geometry_handler.py b/yt/geometry/oct_geometry_handler.py index 1901cae1984..62deab8d9f2 100644 --- a/yt/geometry/oct_geometry_handler.py +++ b/yt/geometry/oct_geometry_handler.py @@ -27,7 +27,7 @@ def convert(self, unit): def _add_mesh_sampling_particle_field(self, deposit_field, ftype, ptype): units = self.ds.field_info[ftype, deposit_field].units take_log = self.ds.field_info[ftype, deposit_field].take_log - field_name = "cell_%s_%s" % (ftype, deposit_field) + field_name = f"cell_{ftype}_{deposit_field}" def _cell_index(field, data): # Get the position of the particles diff --git a/yt/geometry/tests/test_particle_octree.py b/yt/geometry/tests/test_particle_octree.py index 0bf46234bbf..dce4a09a973 100644 --- a/yt/geometry/tests/test_particle_octree.py +++ b/yt/geometry/tests/test_particle_octree.py @@ -147,9 +147,7 @@ def FakeBitmap( reg._set_coarse_index_data_file(i) if i != (nfiles - 1): raise RuntimeError( - "There are positions for {} files, but there should be {}.".format( - i + 1, nfiles - ) + f"There are positions for {i + 1} files, but there should be {nfiles}." ) # Refined index mask = reg.masks.sum(axis=1).astype("uint8") @@ -336,8 +334,8 @@ def test_bitmap_select(): selector = RegionSelector(fr) (df, gf), (dmask, gmask) = reg.identify_data_files(selector, ngz=1) if exact_division: - assert_equal(len(df), 1, "selector {}, number of files".format(i)) - assert_equal(df[0], i, "selector {}, file selected".format(i)) + assert_equal(len(df), 1, f"selector {i}, number of files") + assert_equal(df[0], i, f"selector {i}, file selected") if periodic and (nfiles != 2): ans_gf = sorted([(i - 1) % nfiles, (i + 1) % nfiles]) elif i == 0: @@ -347,14 +345,10 @@ def test_bitmap_select(): else: ans_gf = [i - 1, i + 1] assert_equal( - len(gf), - len(ans_gf), - "selector {}, number of ghost files".format(i), + len(gf), len(ans_gf), f"selector {i}, number of ghost files", ) for i in range(len(gf)): - assert_equal( - gf[i], ans_gf[i], "selector {}, ghost files".format(i) - ) + assert_equal(gf[i], ans_gf[i], f"selector {i}, ghost files") else: lf_frac = np.floor(float(fr.left_edge[0]) / div) * div @@ -375,7 +369,7 @@ def test_bitmap_select(): if (lf + 0.5) <= (lf_frac - div): lf += 1 df_ans = np.arange(max(lf, 0), min(rf + 1, nfiles)) - assert_array_equal(df, df_ans, "selector {}, file array".format(i)) + assert_array_equal(df, df_ans, f"selector {i}, file array") # Ghost zones selected files lf_ghost = int( np.floor(lf_frac - div) @@ -398,9 +392,7 @@ def test_bitmap_select(): if rf_ghost > rf: gf_ans.append(rf_ghost % nfiles) gf_ans = np.array(sorted(gf_ans)) - assert_array_equal( - gf, gf_ans, "selector {}, ghost file array".format(i) - ) + assert_array_equal(gf, gf_ans, f"selector {i}, ghost file array") def cell_centers(order, left_edge, right_edge): @@ -468,14 +460,14 @@ def makeall_decomp_hilbert_gaussian( np.random.seed(int(0x4D3D3D3)) DW = DRE - DLE if fname_base is None: - fname_base = "hilbert{}_gaussian_np{}_nf{}_".format(order, npart, nfiles) + fname_base = f"hilbert{order}_gaussian_np{npart}_nf{nfiles}_" if width is None: width = 0.1 * DW if center is None: center = DLE + 0.5 * DW def load_pos(file_id): - filename = fname_base + "file{}".format(file_id) + filename = fname_base + f"file{file_id}" if os.path.isfile(filename): fd = open(filename, "rb") positions = pickle.load(fd) @@ -485,7 +477,7 @@ def load_pos(file_id): return positions def save_pos(file_id, positions): - filename = fname_base + "file{}".format(file_id) + filename = fname_base + f"file{file_id}" fd = open(filename, "wb") pickle.dump(positions, fd) fd.close() @@ -678,7 +670,7 @@ def fake_decomp( import pickle if fname is None and distrib == "gaussian": - fname = "{}6_{}_np{}_nf{}_file{}".format(decomp, distrib, npart, nfiles, ifile) + fname = f"{decomp}6_{distrib}_np{npart}_nf{nfiles}_file{ifile}" if fname is not None and os.path.isfile(fname): fd = open(fname, "rb") pos = pickle.load(fd) @@ -734,7 +726,7 @@ def fake_decomp( pos = fake_decomp_random(npart, nfiles, ifile, DLE, DRE, **kws) else: raise ValueError( - "Unsupported value {} for input parameter 'distrib'".format(distrib) + f"Unsupported value {distrib} for input parameter 'distrib'" ) # Each file contains a slab (part of x domain, all of y/z domain) elif decomp == "sliced": @@ -742,7 +734,7 @@ def fake_decomp( pos = fake_decomp_sliced(npart, nfiles, ifile, DLE, DRE, **kws) else: raise ValueError( - "Unsupported value {} for input parameter 'distrib'".format(distrib) + f"Unsupported value {distrib} for input parameter 'distrib'" ) # Particles are assigned to files based on their location on a # Peano-Hilbert curve of order 6 @@ -770,7 +762,7 @@ def fake_decomp( ) else: raise ValueError( - "Unsupported value {} for input parameter 'distrib'".format(distrib) + f"Unsupported value {distrib} for input parameter 'distrib'" ) # Particles are assigned to files based on their location on a # Morton ordered Z-curve of order 6 @@ -783,12 +775,10 @@ def fake_decomp( pos = fake_decomp_morton(npart, nfiles, ifile, DLE, DRE, **kws) else: raise ValueError( - "Unsupported value {} for input parameter 'distrib'".format(distrib) + f"Unsupported value {distrib} for input parameter 'distrib'" ) else: - raise ValueError( - "Unsupported value {} for input parameter 'decomp'".format(decomp) - ) + raise ValueError(f"Unsupported value {decomp} for input parameter 'decomp'") # Save if fname is not None: fd = open(fname, "wb") diff --git a/yt/pmods.py b/yt/pmods.py index 8e94ce0d71a..9ee8b551855 100644 --- a/yt/pmods.py +++ b/yt/pmods.py @@ -328,7 +328,7 @@ def __find_head_package__(parent, name): head = name tail = "" if parent: - qname = "%s.%s" % (parent.__name__, head) + qname = f"{parent.__name__}.{head}" else: qname = head q = __import_module__(head, qname, parent) @@ -350,7 +350,7 @@ def __load_tail__(q, tail): if i < 0: i = len(tail) head, tail = tail[:i], tail[i + 1 :] - mname = "%s.%s" % (m.__name__, head) + mname = f"{m.__name__}.{head}" m = __import_module__(head, mname, m) if not m: raise ImportError("No module named " + mname) @@ -369,7 +369,7 @@ def __ensure_fromlist__(m, fromlist, recursive=0): __ensure_fromlist__(m, all, 1) continue if sub != "*" and not hasattr(m, sub): - subname = "%s.%s" % (m.__name__, sub) + subname = f"{m.__name__}.{sub}" submod = __import_module__(sub, subname, m) if not submod: raise ImportError("No module named " + subname) diff --git a/yt/startup_tasks.py b/yt/startup_tasks.py index 6515a9401a1..8e7ab1cdf58 100644 --- a/yt/startup_tasks.py +++ b/yt/startup_tasks.py @@ -92,7 +92,7 @@ def error(self, message): and then exits. """ self.print_help(sys.stderr) - self.exit(2, "%s: error: %s\n" % (self.prog, message)) + self.exit(2, f"{self.prog}: error: {message}\n") parser = YTParser(description="yt command line arguments") diff --git a/yt/testing.py b/yt/testing.py index 97cf8656995..75ec9a7969e 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -232,9 +232,9 @@ def fake_random_ds( else: data["io", field] = (prng.random_sample(size=int(particles)), unit) else: - for f in ("particle_position_%s" % ax for ax in "xyz"): + for f in (f"particle_position_{ax}" for ax in "xyz"): data["io", f] = (prng.random_sample(size=particles), "code_length") - for f in ("particle_velocity_%s" % ax for ax in "xyz"): + for f in (f"particle_velocity_{ax}" for ax in "xyz"): data["io", f] = (prng.random_sample(size=particles) - 0.5, "cm/s") data["io", "particle_mass"] = (prng.random_sample(particles), "g") ug = load_uniform_grid( @@ -279,12 +279,12 @@ def fake_amr_ds( for f in fields: gdata[f] = prng.random_sample(dims) if particles: - for i, f in enumerate("particle_position_%s" % ax for ax in "xyz"): + for i, f in enumerate(f"particle_position_{ax}" for ax in "xyz"): pdata = prng.random_sample(particles) pdata /= right_edge[i] - left_edge[i] pdata += left_edge[i] gdata["io", f] = (pdata, "code_length") - for f in ("particle_velocity_%s" % ax for ax in "xyz"): + for f in (f"particle_velocity_{ax}" for ax in "xyz"): gdata["io", f] = (prng.random_sample(particles) - 0.5, "cm/s") gdata["io", "particle_mass"] = (prng.random_sample(particles), "g") data.append(gdata) @@ -860,15 +860,15 @@ def units_override_check(fn): attrs1 = [] attrs2 = [] for u in units_list: - unit_attr = getattr(ds1, "%s_unit" % u, None) + unit_attr = getattr(ds1, f"{u}_unit", None) if unit_attr is not None: attrs1.append(unit_attr) - units_override["%s_unit" % u] = (unit_attr.v, str(unit_attr.units)) + units_override[f"{u}_unit"] = (unit_attr.v, str(unit_attr.units)) del ds1 ds2 = load(fn, units_override=units_override) assert len(ds2.units_override) > 0 for u in units_list: - unit_attr = getattr(ds2, "%s_unit" % u, None) + unit_attr = getattr(ds2, f"{u}_unit", None) if unit_attr is not None: attrs2.append(unit_attr) assert_equal(attrs1, attrs2) @@ -993,7 +993,7 @@ def _func(*args, **kwargs): su = _rv.sum(dtype="float64") si = _rv.size ha = hashlib.md5(_rv.tostring()).hexdigest() - fn = "func_results_ref_%s.cpkl" % (name) + fn = f"func_results_ref_{name}.cpkl" with open(fn, "wb") as f: pickle.dump((mi, ma, st, su, si, ha), f) return rv @@ -1023,13 +1023,13 @@ def _func(*args, **kwargs): _rv.size, hashlib.md5(_rv.tostring()).hexdigest(), ) - fn = "func_results_ref_%s.cpkl" % (name) + fn = f"func_results_ref_{name}.cpkl" if not os.path.exists(fn): print("Answers need to be created with --answer-reference .") return False with open(fn, "rb") as f: ref = pickle.load(f) - print("Sizes: %s (%s, %s)" % (vals[4] == ref[4], vals[4], ref[4])) + print(f"Sizes: {vals[4] == ref[4]} ({vals[4]}, {ref[4]})") assert_allclose(vals[0], ref[0], 1e-8, err_msg="min") assert_allclose(vals[1], ref[1], 1e-8, err_msg="max") assert_allclose(vals[2], ref[2], 1e-8, err_msg="std") @@ -1147,7 +1147,7 @@ def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): rt = YTArray(rtol) if not rt.units.is_dimensionless: - raise AssertionError("Units of rtol (%s) are not " "dimensionless" % rt.units) + raise AssertionError(f"Units of rtol ({rt.units}) are not dimensionless") if not isinstance(atol, YTArray): at = YTQuantity(atol, des.units) @@ -1230,9 +1230,7 @@ def ffalse(func): # needed since None is no longer returned, so we check for the skip # exception in the xfail case for that test def skip(*args, **kwargs): - msg = "`{}` backend not found, skipping: `{}`".format( - backend, func.__name__ - ) + msg = f"`{backend}` backend not found, skipping: `{func.__name__}`" print(msg) pytest.skip(msg) diff --git a/yt/utilities/amr_kdtree/amr_kdtree.py b/yt/utilities/amr_kdtree/amr_kdtree.py index b4407e5dacc..904b605199e 100644 --- a/yt/utilities/amr_kdtree/amr_kdtree.py +++ b/yt/utilities/amr_kdtree/amr_kdtree.py @@ -471,7 +471,7 @@ def store_kd_bricks(self, fn=None): if not self._initialized: self.initialize_source() if fn is None: - fn = "%s_kd_bricks.h5" % self.ds + fn = f"{self.ds}_kd_bricks.h5" if self.comm.rank != 0: self.comm.recv_array(self.comm.rank - 1, tag=self.comm.rank - 1) f = h5py.File(fn, mode="w") @@ -481,7 +481,7 @@ def store_kd_bricks(self, fn=None): for fi, field in enumerate(self.fields): try: f.create_dataset( - "/brick_%s_%s" % (hex(i), field), + f"/brick_{hex(i)}_{field}", data=node.data.my_data[fi].astype("float64"), ) except Exception: @@ -493,7 +493,7 @@ def store_kd_bricks(self, fn=None): def load_kd_bricks(self, fn=None): if fn is None: - fn = "%s_kd_bricks.h5" % self.ds + fn = f"{self.ds}_kd_bricks.h5" if self.comm.rank != 0: self.comm.recv_array(self.comm.rank - 1, tag=self.comm.rank - 1) try: @@ -502,7 +502,7 @@ def load_kd_bricks(self, fn=None): i = node.node_id if node.grid != -1: data = [ - f["brick_%s_%s" % (hex(i), field)][:].astype("float64") + f[f"brick_{hex(i)}_{field}"][:].astype("float64") for field in self.fields ] node.data = PartitionedGrid( @@ -654,4 +654,4 @@ def count_cells(self): print(hv.tree.trunk.kd_sum_volume()) print(hv.tree.trunk.kd_node_check()) - print("Time: %e seconds" % (t2 - t1)) + print(f"Time: {t2 - t1:e} seconds") diff --git a/yt/utilities/answer_testing/answer_tests.py b/yt/utilities/answer_testing/answer_tests.py index 37a6b1bc0bf..48b732d8e7f 100644 --- a/yt/utilities/answer_testing/answer_tests.py +++ b/yt/utilities/answer_testing/answer_tests.py @@ -112,7 +112,7 @@ def pixelized_projection_values(ds, axis, field, weight_field=None, dobj_type=No d = frb.data for f in proj.field_data: # Sometimes f will be a tuple. - d["%s_sum" % (f,)] = proj.field_data[f].sum(dtype="float64") + d[f"{f}_sum"] = proj.field_data[f].sum(dtype="float64") # This is to try and remove python-specific anchors in the yaml # answer file. Also, using __repr__() results in weird strings # of strings that make comparison fail even though the data is diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 25d7315ae22..552bcaa1fc1 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -100,7 +100,7 @@ def my_version(self, version=None): try: version = get_yt_version() except Exception: - version = "UNKNOWN%s" % (time.time()) + version = f"UNKNOWN{time.time()}" self._my_version = version return self._my_version @@ -245,7 +245,7 @@ def dump(self, result_storage): for i, ds_name in enumerate(result_storage): pb.update(i) rs = pickle.dumps(result_storage[ds_name]) - object_name = "%s_%s" % (self.answer_name, ds_name) + object_name = f"{self.answer_name}_{ds_name}" if object_name in c.get_object_names(): obj = c.get_object(object_name) c.delete_object(obj) @@ -265,7 +265,7 @@ def dump(self, result_storage): # Store data using shelve ds = shelve.open(self.answer_name, protocol=-1) for ds_name in result_storage: - answer_name = "%s" % ds_name + answer_name = f"{ds_name}" if answer_name in ds: mylog.info("Overwriting %s", answer_name) ds[answer_name] = result_storage[ds_name] @@ -275,7 +275,7 @@ def get(self, ds_name, default=None): if self.reference_name is None: return default # Read data using shelve - answer_name = "%s" % ds_name + answer_name = f"{ds_name}" ds = shelve.open(self.reference_name, protocol=-1) try: result = ds[answer_name] @@ -388,8 +388,8 @@ def __call__(self): # nosetests command line arguments. In this case, set the answer_name # from the `answer_name` keyword in the test case if self.options.answer_name is None: - pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor) - self.answer_name = "{}_{}".format(pyver, self.answer_name) + pyver = f"py{sys.version_info.major}{sys.version_info.minor}" + self.answer_name = f"{pyver}_{self.answer_name}" answer_store_dir = os.path.realpath(self.options.output_dir) ref_name = os.path.join( @@ -411,7 +411,7 @@ def __call__(self): # Compare test generated values against the golden answer dd = self.reference_storage.get(self.storage_name) if dd is None or self.description not in dd: - raise YTNoOldAnswer("%s : %s" % (self.storage_name, self.description)) + raise YTNoOldAnswer(f"{self.storage_name} : {self.description}") ov = dd[self.description] self.compare(nv, ov) else: @@ -422,7 +422,7 @@ def __call__(self): @property def storage_name(self): if self.prefix != "": - return "%s_%s" % (self.prefix, self.ds) + return f"{self.prefix}_{self.ds}" return str(self.ds) def compare(self, new_result, old_result): @@ -502,7 +502,7 @@ def run(self): return [avg, mi, ma] def compare(self, new_result, old_result): - err_msg = "Field values for %s not equal." % (self.field,) + err_msg = f"Field values for {self.field} not equal." if hasattr(new_result, "d"): new_result = new_result.d if hasattr(old_result, "d"): @@ -541,7 +541,7 @@ def run(self): return obj[self.field] def compare(self, new_result, old_result): - err_msg = "All field values for %s not equal." % self.field + err_msg = f"All field values for {self.field} not equal." if hasattr(new_result, "d"): new_result = new_result.d if hasattr(old_result, "d"): @@ -651,7 +651,7 @@ def run(self): d = frb.data for f in proj.field_data: # Sometimes f will be a tuple. - d["%s_sum" % (f,)] = proj.field_data[f].sum(dtype="float64") + d[f"{f}_sum"] = proj.field_data[f].sum(dtype="float64") return d def compare(self, new_result, old_result): @@ -795,7 +795,7 @@ def compare_image_lists(new_result, old_result, decimals): if line.endswith(".png") ] for fn in tempfiles: - sys.stderr.write("\n[[ATTACHMENT|{}]]".format(fn)) + sys.stderr.write(f"\n[[ATTACHMENT|{fn}]]") sys.stderr.write("\n") assert_equal(results, None, results) for fn in fns: @@ -1055,8 +1055,8 @@ def run(self): # Wipe out invalid values (fillers) pix_x[~np.isfinite(pix_x)] = 0.0 pix_y[~np.isfinite(pix_y)] = 0.0 - rv["%s_x" % axis] = pix_x - rv["%s_y" % axis] = pix_y + rv[f"{axis}_x"] = pix_x + rv[f"{axis}_y"] = pix_y return rv def compare(self, new_result, old_result): diff --git a/yt/utilities/answer_testing/level_sets_tests.py b/yt/utilities/answer_testing/level_sets_tests.py index 00dcdf744aa..5d12f2b4631 100644 --- a/yt/utilities/answer_testing/level_sets_tests.py +++ b/yt/utilities/answer_testing/level_sets_tests.py @@ -33,5 +33,5 @@ def run(self): return result def compare(self, new_result, old_result): - err_msg = "Size and/or mass of connected sets do not agree for %s." % self.ds_fn + err_msg = f"Size and/or mass of connected sets do not agree for {self.ds_fn}." assert_equal(new_result, old_result, err_msg=err_msg, verbose=True) diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index cbb6979b5bc..7109783ba0b 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -358,7 +358,7 @@ def requires_ds(ds_fn, file_check=False): def ffalse(func): @functools.wraps(func) def skip(*args, **kwargs): - msg = "{} not found, skipping {}.".format(ds_fn, func.__name__) + msg = f"{ds_fn} not found, skipping {func.__name__}." pytest.fail(msg) return skip @@ -385,7 +385,7 @@ def requires_sim(sim_fn, sim_type, file_check=False): def ffalse(func): @functools.wraps(func) def skip(*args, **kwargs): - msg = "{} not found, skipping {}.".format(sim_fn, func.__name__) + msg = f"{sim_fn} not found, skipping {func.__name__}." pytest.fail(msg) return skip diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 27fcb2e8079..565454264fc 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -54,10 +54,10 @@ def _fix_ds(arg, *args, **kwargs): - if os.path.isdir("%s" % arg) and os.path.exists("%s/%s" % (arg, arg)): - ds = load("%s/%s" % (arg, arg), *args, **kwargs) - elif os.path.isdir("%s.dir" % arg) and os.path.exists("%s.dir/%s" % (arg, arg)): - ds = load("%s.dir/%s" % (arg, arg), *args, **kwargs) + if os.path.isdir(f"{arg}") and os.path.exists(f"{arg}/{arg}"): + ds = load(f"{arg}/{arg}", *args, **kwargs) + elif os.path.isdir(f"{arg}.dir") and os.path.exists(f"{arg}.dir/{arg}"): + ds = load(f"{arg}.dir/{arg}", *args, **kwargs) elif arg.endswith(".index"): ds = load(arg[:-10], *args, **kwargs) else: @@ -110,20 +110,20 @@ def _print_installation_information(path): print() print("yt module located at:") - print(" %s" % (path)) + print(f" {path}") if "YT_DEST" in os.environ: spath = os.path.join(os.environ["YT_DEST"], "src", "yt-supplemental") if os.path.isdir(spath): print("The supplemental repositories are located at:") - print(" %s" % (spath)) + print(f" {spath}") print() print("The current version of yt is:") print() print("---") - print("Version = %s" % yt.__version__) + print(f"Version = {yt.__version__}") vstring = get_hg_or_git_version(path) if vstring is not None: - print("Changeset = %s" % vstring.strip()) + print(f"Changeset = {vstring.strip()}") print("---") return vstring @@ -647,7 +647,7 @@ def __call__(self, parser, namespace, values, option_string=None): # This code snippet is modified from Georg Brandl def bb_apicall(endpoint, data, use_pass=True): - uri = "https://api.bitbucket.org/1.0/%s/" % endpoint + uri = f"https://api.bitbucket.org/1.0/{endpoint}/" # since bitbucket doesn't return the required WWW-Authenticate header when # making a request without Authorization, we cannot use the standard urllib2 # auth handlers; we have to add the requisite header from the start @@ -657,8 +657,8 @@ def bb_apicall(endpoint, data, use_pass=True): if use_pass: username = input("Bitbucket Username? ") password = getpass.getpass() - upw = "%s:%s" % (username, password) - req.add_header("Authorization", "Basic %s" % base64.b64encode(upw).strip()) + upw = f"{username}:{password}" + req.add_header("Authorization", f"Basic {base64.b64encode(upw).strip()}") return urllib.request.urlopen(req).read() @@ -717,12 +717,12 @@ def __call__(self, args): print("the pastebin and then include the link in this bugreport.") if "EDITOR" in os.environ: print() - print("Press enter to spawn your editor, %s" % os.environ["EDITOR"]) + print(f"Press enter to spawn your editor, {os.environ['EDITOR']}") input() tf = tempfile.NamedTemporaryFile(delete=False) fn = tf.name tf.close() - subprocess.call("$EDITOR %s" % fn, shell=True) + subprocess.call(f"$EDITOR {fn}", shell=True) content = open(fn).read() try: os.unlink(fn) @@ -744,7 +744,7 @@ def __call__(self, args): break lines.append(line) content = "\n".join(lines) - content = "Reporting Version: %s\n\n%s" % (current_version, content) + content = f"Reporting Version: {current_version}\n\n{content}" endpoint = "repositories/yt_analysis/yt/issues" data["content"] = content print() @@ -752,7 +752,7 @@ def __call__(self, args): print() print("Okay, we're going to submit with this:") print() - print("Summary: %s" % (data["title"])) + print(f"Summary: {data['title']}") print() print("---") print(content) @@ -769,13 +769,13 @@ def __call__(self, args): import json retval = json.loads(retval) - url = "http://bitbucket.org/yt_analysis/yt/issue/%s" % retval["local_id"] + url = f"http://bitbucket.org/yt_analysis/yt/issue/{retval['local_id']}" print() print("===============================================================") print() print("Thanks for your bug report! Together we'll make yt totally bug free!") print("You can view bug report here:") - print(" %s" % url) + print(f" {url}") print() print("Keep in touch!") print() @@ -795,7 +795,7 @@ def __call__(self, args): raise YTCommandRequiresModule("requests") if ytcfg.get("yt", "hub_api_key") != "": print("You seem to already have an API key for the hub in") - print("{} . Delete this if you want to force a".format(CURRENT_CONFIG_FILE)) + print(f"{CURRENT_CONFIG_FILE} . Delete this if you want to force a") print("new user registration.") sys.exit() print("Awesome! Let's start by registering a new user for you.") @@ -838,7 +838,7 @@ def __call__(self, args): print() print() print("Okay, press enter to register. You should receive a welcome") - print("message at %s when this is complete." % email) + print(f"message at {email} when this is complete.") print() input() @@ -1165,7 +1165,7 @@ def __call__(self, args): # TODO: should happen server-side _id = gc._checkResourcePath(args.folderId) - resp = gc.post("/notebook/{}".format(_id)) + resp = gc.post(f"/notebook/{_id}") try: print("Launched! Please visit this URL:") print(" https://tmpnb.hub.yt" + resp["url"]) @@ -1186,16 +1186,16 @@ class YTNotebookUploadCmd(YTCommand): def __call__(self, args): gc = _get_girder_client() username = gc.get("/user/me")["login"] - gc.upload(args.file, "/user/{}/Public".format(username)) + gc.upload(args.file, f"/user/{username}/Public") - _id = gc.resourceLookup("/user/{}/Public/{}".format(username, args.file))["_id"] + _id = gc.resourceLookup(f"/user/{username}/Public/{args.file}")["_id"] _fid = next(gc.listFile(_id))["_id"] hub_url = urlparse(ytcfg.get("yt", "hub_url")) print("Upload successful!") print() print("To access your raw notebook go here:") print() - print(" {}://{}/#item/{}".format(hub_url.scheme, hub_url.netloc, _id)) + print(f" {hub_url.scheme}://{hub_url.netloc}/#item/{_id}") print() print("To view your notebook go here:") print() @@ -1305,7 +1305,7 @@ def __call__(self, args): if args.zlim: plt.set_zlim(args.field, *args.zlim) ensure_dir_exists(args.output) - plt.save(os.path.join(args.output, "%s" % (ds))) + plt.save(os.path.join(args.output, f"{ds}")) class YTRPDBCmd(YTCommand): @@ -1391,7 +1391,7 @@ def __call__(self, args): print("place a line like this inside the [yt] section in your") print("yt configuration file at ~/.config/yt/ytrc") print() - print("notebook_password = %s" % pw) + print(f"notebook_password = {pw}") print() elif args.no_password: pw = None @@ -1408,10 +1408,10 @@ def __call__(self, args): print() print("The notebook is now live at:") print() - print(" http://127.0.0.1:%s/" % app.port) + print(f" http://127.0.0.1:{app.port}/") print() print("Recall you can create a new SSH tunnel dynamically by pressing") - print("~C and then typing -L%s:localhost:%s" % (app.port, app.port)) + print(f"~C and then typing -L{app.port}:localhost:{app.port}") print("where the first number is the port on your local machine. ") print() print( @@ -1519,9 +1519,7 @@ class YTDeleteImageCmd(YTCommand): name = "delete_image" def __call__(self, args): - headers = { - "Authorization": "Client-ID {}".format(ytcfg.get("yt", "imagebin_api_key")) - } + headers = {"Authorization": f"Client-ID {ytcfg.get('yt', 'imagebin_api_key')}"} delete_url = ytcfg.get("yt", "imagebin_delete_url") req = urllib.request.Request( @@ -1558,16 +1556,14 @@ def __call__(self, args): if not filename.endswith(".png"): print("File must be a PNG file!") return 1 - headers = { - "Authorization": "Client-ID {}".format(ytcfg.get("yt", "imagebin_api_key")) - } + headers = {"Authorization": f"Client-ID {ytcfg.get('yt', 'imagebin_api_key')}"} image_data = base64.b64encode(open(filename, "rb").read()) parameters = { "image": image_data, type: "base64", "name": filename, - "title": "%s uploaded by yt" % filename, + "title": f"{filename} uploaded by yt", } data = urllib.parse.urlencode(parameters).encode("utf-8") req = urllib.request.Request( @@ -1582,10 +1578,10 @@ def __call__(self, args): if "data" in rv and "link" in rv["data"]: print() print("Image successfully uploaded! You can find it at:") - print(" %s" % (rv["data"]["link"])) + print(f" {rv['data']['link']}") print() print("If you'd like to delete it, use the following") - print(" yt delete_image %s" % rv["data"]["deletehash"]) + print(f" yt delete_image {rv['data']['deletehash']}") print() else: print() @@ -1745,7 +1741,7 @@ def __call__(self, args): records.append(record) with open(args.output, "w") as f: json.dump(records, f, indent=4) - print("Identified %s records output to %s" % (len(records), args.output)) + print(f"Identified {len(records)} records output to {args.output}") class YTDownloadData(YTCommand): @@ -1806,25 +1802,25 @@ def __call__(self, args): raise RuntimeError( "You need to specify download location. See " "--help for details." ) - data_url = "http://yt-project.org/data/%s" % args.filename + data_url = f"http://yt-project.org/data/{args.filename}" if args.location in ["test_data_dir", "supp_data_dir"]: data_dir = ytcfg.get("yt", args.location) if data_dir == "/does/not/exist": - raise RuntimeError("'%s' is not configured!" % args.location) + raise RuntimeError(f"'{args.location}' is not configured!") else: data_dir = args.location if not os.path.exists(data_dir): - print("The directory '%s' does not exist. Creating..." % data_dir) + print(f"The directory '{data_dir}' does not exist. Creating...") ensure_dir(data_dir) data_file = os.path.join(data_dir, args.filename) if os.path.exists(data_file) and not args.overwrite: - raise IOError("File '%s' exists and overwrite=False!" % data_file) - print("Attempting to download file: %s" % args.filename) + raise IOError(f"File '{data_file}' exists and overwrite=False!") + print(f"Attempting to download file: {args.filename}") fn = download_file(data_url, data_file) if not os.path.exists(fn): - raise IOError("The file '%s' did not download!!" % args.filename) - print("File: %s downloaded successfully to %s" % (args.filename, data_file)) + raise IOError(f"The file '{args.filename}' did not download!!") + print(f"File: {args.filename} downloaded successfully to {data_file}") def get_list(self): data = ( diff --git a/yt/utilities/configure.py b/yt/utilities/configure.py index 6ce795d02d0..7a138107aac 100644 --- a/yt/utilities/configure.py +++ b/yt/utilities/configure.py @@ -34,21 +34,21 @@ def migrate_config(): print("Old config not found.") sys.exit() CONFIG.read(_OLD_CONFIG_FILE) - print("Writing a new config file to: {}".format(CURRENT_CONFIG_FILE)) + print(f"Writing a new config file to: {CURRENT_CONFIG_FILE}") write_config() - print("Backing up the old config file: {}.bak".format(_OLD_CONFIG_FILE)) + print(f"Backing up the old config file: {_OLD_CONFIG_FILE}.bak") os.rename(_OLD_CONFIG_FILE, _OLD_CONFIG_FILE + ".bak") old_config_dir = os.path.dirname(_OLD_CONFIG_FILE) try: plugin_file = CONFIG.get("yt", "pluginfilename") if plugin_file and os.path.exists(os.path.join(old_config_dir, plugin_file)): - print("Migrating plugin file {} to new location".format(plugin_file)) + print(f"Migrating plugin file {plugin_file} to new location") shutil.copyfile( os.path.join(old_config_dir, plugin_file), os.path.join(os.path.dirname(CURRENT_CONFIG_FILE), plugin_file), ) - print("Backing up the old plugin file: {}.bak".format(_OLD_CONFIG_FILE)) + print(f"Backing up the old plugin file: {_OLD_CONFIG_FILE}.bak") plugin_file = os.path.join(old_config_dir, plugin_file) os.rename(plugin_file, plugin_file + ".bak") except configparser.NoOptionError: diff --git a/yt/utilities/cosmology.py b/yt/utilities/cosmology.py index 3bccef03685..1071804bb05 100644 --- a/yt/utilities/cosmology.py +++ b/yt/utilities/cosmology.py @@ -88,7 +88,7 @@ def __init__( unit_registry = UnitRegistry(unit_system=unit_system) unit_registry.add("h", hubble_constant, dimensions.dimensionless, r"h") for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit + new_unit = f"{my_unit}cm" my_u = Unit(my_unit, registry=unit_registry) # technically not true, but distances here are actually comoving unit_registry.add( diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 1f1972f3d87..fb5b98b4e95 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -20,11 +20,11 @@ def __init__(self, filename, args=None, kwargs=None): self.kwargs = kwargs def __str__(self): - msg = "Could not determine input format from %s" % self.filename + msg = f"Could not determine input format from {self.filename}" if self.args is not None: - msg += ", %s" % self.args + msg += ", {self.args}" if self.kwargs is not None: - msg += ", %s" % self.kwargs + msg += f", {self.kwargs}" msg += "." return msg @@ -59,7 +59,7 @@ def __init__(self, axes): self.axes = axes def __str__(self): - return "The supplied axes are not orthogonal. %s" % (self.axes) + return f"The supplied axes are not orthogonal. {self.axes}" class YTNoDataInObjectError(YTException): @@ -79,7 +79,7 @@ def __init__(self, field, ds): self.ds = ds def __str__(self): - return "Could not find field %s in %s." % (self.field, self.ds) + return f"Could not find field {self.field} in {self.ds}." class YTParticleTypeNotFound(YTException): @@ -88,7 +88,7 @@ def __init__(self, fname, ds): self.ds = ds def __str__(self): - return "Could not find particle_type '%s' in %s." % (self.fname, self.ds) + return f"Could not find particle_type '{self.fname}' in {self.ds}." class YTSceneFieldNotFound(YTException): @@ -97,7 +97,7 @@ class YTSceneFieldNotFound(YTException): class YTCouldNotGenerateField(YTFieldNotFound): def __str__(self): - return "Could field '%s' in %s could not be generated." % (self.fname, self.ds) + return f"Could field '{self.fname}' in {self.ds} could not be generated." class YTFieldTypeNotFound(YTException): @@ -113,7 +113,7 @@ def __str__(self): + "Try adding this field with particle_type=True." ) % self.ftype else: - return "Could not find field type '%s'." % (self.ftype) + return f"Could not find field type '{self.ftype}'." class YTSimulationNotIdentified(YTException): @@ -122,7 +122,7 @@ def __init__(self, sim_type): self.sim_type = sim_type def __str__(self): - return "Simulation time-series type %s not defined." % self.sim_type + return f"Simulation time-series type {self.sim_type} not defined." class YTCannotParseFieldDisplayName(YTException): @@ -167,7 +167,7 @@ def __init__(self, ds, parameter): self.parameter = parameter def __str__(self): - return "dataset %s is missing %s parameter." % (self.ds, self.parameter) + return f"dataset {self.ds} is missing {self.parameter} parameter." class NoStoppingCondition(YTException): @@ -191,7 +191,7 @@ def __init__(self, geom): self.geom = geom def __str__(self): - return "We don't currently support %s geometry" % self.geom + return f"We don't currently support {self.geom} geometry" class YTCoordinateNotImplemented(YTException): @@ -208,7 +208,7 @@ def __init__(self, unit): self.unit = unit def __str__(self): - return "This dataset doesn't recognize %s" % self.unit + return f"This dataset doesn't recognize {self.unit}" class YTFieldUnitError(YTException): @@ -258,9 +258,7 @@ def __init__(self, pattern): self.pattern = pattern def __str__(self): - return "No filenames were found to match the pattern: " + "'%s'" % ( - self.pattern - ) + return "No filenames were found to match the pattern: " + f"'{self.pattern}'" class YTNoOldAnswer(YTException): @@ -315,7 +313,7 @@ def __init__(self, filename): def __str__(self): return ( "Enzo test output file (OutputLog) not generated for: " - + "'%s'" % (self.testname) + + f"'{self.testname}'" + ".\nTest did not complete." ) @@ -338,16 +336,14 @@ def __init__(self, nv, fn): self.fn = fn def __str__(self): - s = "There are too many vertices (%s) to upload to Sketchfab. " % (self.nv) - s += "Your model has been saved as %s . You should upload manually." % ( - self.fn - ) + s = f"There are too many vertices ({self.nv}) to upload to Sketchfab. " + s += f"Your model has been saved as {self.fn} . You should upload manually." return s class YTInvalidWidthError(YTException): def __init__(self, width): - self.error = "width (%s) is invalid" % str(width) + self.error = f"width ({str(width)}) is invalid" def __str__(self): return str(self.error) @@ -358,7 +354,7 @@ def __init__(self, field): self.field = field def __str__(self): - return "Cannot identify field %s" % (self.field,) + return f"Cannot identify field {self.field}" class YTDataSelectorNotImplemented(YTException): @@ -366,7 +362,7 @@ def __init__(self, class_name): self.class_name = class_name def __str__(self): - return "Data selector '%s' not implemented." % (self.class_name) + return f"Data selector '{self.class_name}' not implemented." class YTParticleDepositionNotImplemented(YTException): @@ -374,7 +370,7 @@ def __init__(self, class_name): self.class_name = class_name def __str__(self): - return "Particle deposition method '%s' not implemented." % (self.class_name) + return f"Particle deposition method '{self.class_name}' not implemented." class YTDomainOverflow(YTException): @@ -399,7 +395,7 @@ def __init__(self, dims, dd): self.dd = dd def __str__(self): - return "Integer domain overflow: %s in %s" % (self.dims, self.dd) + return f"Integer domain overflow: {self.dims} in {self.dd}" class YTIllDefinedFilter(YTException): @@ -509,14 +505,10 @@ def __init__(self, field, new_spec, old_spec): self.old_spec = old_spec def __str__(self): - r = """Field %s already exists with field spec: - %s + r = f"""Field {self.field} already exists with field spec: + {self.old_spec} But being asked to add it with: - %s""" % ( - self.field, - self.old_spec, - self.new_spec, - ) + {self.new_spec}""" return r @@ -526,11 +518,8 @@ def __init__(self, shape, dimensions): self.dimensions = dimensions def __str__(self): - r = """Position arrays must be length and shape (N,3). - But this one has %s and %s.""" % ( - self.dimensions, - self.shape, - ) + r = f"""Position arrays must be length and shape (N,3). + But this one has {self.dimensions} and {self.shape}.""" return r @@ -552,11 +541,9 @@ def __init__(self, conditions, field): self.field = field def __str__(self): - r = """Can't mix particle/discrete and fluid/mesh conditions or - quantities. Field: %s and Conditions specified: - """ % ( - self.field, - ) + r = f"""Can't mix particle/discrete and fluid/mesh conditions or + quantities. Field: {self.field} and Conditions specified: + """ r += "\n".join([c for c in self.conditions]) return r @@ -566,7 +553,7 @@ def __init__(self, filename): self.filename = filename def __str__(self): - return "A file already exists at %s and overwrite=False." % self.filename + return f"A file already exists at {self.filename} and overwrite=False." class YTNonIndexedDataContainer(YTException): @@ -630,10 +617,7 @@ def __init__(self, wrong, right): self.right = right def __str__(self): - return "Dimensionality specified was %s but we need %s" % ( - self.wrong, - self.right, - ) + return f"Dimensionality specified was {self.wrong} but we need {self.right}" class YTInvalidShaderType(YTException): @@ -641,7 +625,7 @@ def __init__(self, source): self.source = source def __str__(self): - return "Can't identify shader_type for file '%s.'" % (self.source) + return f"Can't identify shader_type for file '{self.source}.'" class YTInvalidFieldType(YTException): @@ -667,7 +651,7 @@ def __init__(self, kind): self.kind = kind def __str__(self): - return "Can't determine kind specification for %s" % (self.kind) + return f"Can't determine kind specification for {self.kind}" class YTUnknownUniformSize(YTException): @@ -675,7 +659,7 @@ def __init__(self, size_spec): self.size_spec = size_spec def __str__(self): - return "Can't determine size specification for %s" % (self.size_spec) + return f"Can't determine size specification for {self.size_spec}" class YTDataTypeUnsupported(YTException): @@ -684,8 +668,8 @@ def __init__(self, this, supported): self.this = this def __str__(self): - v = "This operation is not supported for data of geometry %s; " % self.this - v += "It supports data of geometries %s" % (self.supported,) + v = f"This operation is not supported for data of geometry {self.this}; " + v += f"It supports data of geometries {self.supported}" return v @@ -697,7 +681,7 @@ def __init__(self, message, bounds): def __str__(self): v = "This operation has encountered a bounds error: " v += self.message - v += " Specified bounds are '%s'." % (self.bounds,) + v += f" Specified bounds are '{self.bounds}'." return v @@ -763,7 +747,7 @@ def __init__(self, bad_object): self.bad_object = bad_object def __str__(self): - v = "Supplied:\n%s\nto a boolean operation" % (self.bad_object) + v = f"Supplied:\n{self.bad_object}\nto a boolean operation" v += " but it is not a YTSelectionContainer3D object." return v @@ -804,7 +788,7 @@ def __init__(self, shapes): def __str__(self): msg = "Not all grid-based fields have the same shape!\n" for name, shape in self.shapes: - msg += " Field {} has shape {}.\n".format(name, shape) + msg += f" Field {name} has shape {shape}.\n" return msg @@ -819,7 +803,7 @@ def __str__(self): ) for name, shape in self.shapes: field = (self.ptype, name) - msg += " Field {} has shape {}.\n".format(field, shape) + msg += f" Field {field} has shape {shape}.\n" return msg @@ -830,11 +814,11 @@ def __init__(self, shapes, grid_dims): def __str__(self): msg = "Not all grid-based fields match the grid dimensions! " - msg += "Grid dims are {}, ".format(self.grid_dims) + msg += f"Grid dims are {self.grid_dims}, " msg += "and the following fields have shapes that do not match them:\n" for name, shape in self.shapes: if shape != self.grid_dims: - msg += " Field {} has shape {}.\n".format(name, shape) + msg += f" Field {name} has shape {shape}.\n" return msg @@ -843,22 +827,22 @@ def __init__(self, module): self.module = module def __str__(self): - msg = 'This command requires "%s" to be installed.\n\n' % self.module - msg += 'Please install "%s" with the package manager ' % self.module + msg = f'This command requires "{self.module}" to be installed.\n\n' + msg += f'Please install "{self.module}" with the package manager ' msg += "appropriate for your python environment, e.g.:\n" - msg += " conda install %s\n" % self.module + msg += f" conda install {self.module}\n" msg += "or:\n" - msg += " pip install %s\n" % self.module + msg += f" pip install {self.module}\n" return msg class YTModuleRemoved(Exception): def __init__(self, name, new_home=None, info=None): - message = "The %s module has been removed from yt." % name + message = f"The {name} module has been removed from yt." if new_home is not None: - message += "\nIt has been moved to %s." % new_home + message += f"\nIt has been moved to {new_home}." if info is not None: - message += "\nFor more information, see %s." % info + message += f"\nFor more information, see {info}." Exception.__init__(self, message) @@ -868,7 +852,7 @@ def __init__(self, size, max_size): self.max_size = max_size def __str__(self): - msg = "The requested array is of size %s.\n" % self.size + msg = f"The requested array is of size {self.size}.\n" msg += "We do not support displaying arrays larger\n" - msg += "than size %s." % self.max_size + msg += f"than size {self.max_size}." return msg diff --git a/yt/utilities/flagging_methods.py b/yt/utilities/flagging_methods.py index 01743a5a72e..3240d30d1c7 100644 --- a/yt/utilities/flagging_methods.py +++ b/yt/utilities/flagging_methods.py @@ -168,4 +168,4 @@ def find_by_second_derivative(self): return [psg1, psg2] def __str__(self): - return "LI: (%s) DIMS: (%s)" % (self.left_index, self.dimensions) + return f"LI: ({self.left_index}) DIMS: ({self.dimensions})" diff --git a/yt/utilities/fortran_utils.py b/yt/utilities/fortran_utils.py index 2b3325544f9..bfc44c7aba3 100644 --- a/yt/utilities/fortran_utils.py +++ b/yt/utilities/fortran_utils.py @@ -186,10 +186,10 @@ def read_vector(f, d, endian="="): >>> f = open("fort.3", "rb") >>> rv = read_vector(f, 'd') """ - pad_fmt = "%sI" % (endian) + pad_fmt = f"{endian}I" pad_size = struct.calcsize(pad_fmt) vec_len = struct.unpack(pad_fmt, f.read(pad_size))[0] # bytes - vec_fmt = "%s%s" % (endian, d) + vec_fmt = f"{endian}{d}" vec_size = struct.calcsize(vec_fmt) if vec_len % vec_size != 0: raise IOError( @@ -317,7 +317,7 @@ def read_record(f, rspec, endian="="): net_format = endian + "I" for _a, n, t in rspec: t = t if len(t) == 1 else t[-1] - net_format += "%s%s" % (n, t) + net_format += f"{n}{t}" net_format += "I" size = struct.calcsize(net_format) vals = list(struct.unpack(net_format, f.read(size))) diff --git a/yt/utilities/grid_data_format/conversion/conversion_athena.py b/yt/utilities/grid_data_format/conversion/conversion_athena.py index 44f4c30701d..7404b78dd51 100644 --- a/yt/utilities/grid_data_format/conversion/conversion_athena.py +++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py @@ -105,7 +105,7 @@ def read_and_write_index(self, basename, ddn, gdf_name): + ".vtk" ) - print("Reading file %s" % fn) + print(f"Reading file {fn}") f = open(fn, "rb") grid = {} grid["read_field"] = None diff --git a/yt/utilities/lib/cykdtree/tests/__init__.py b/yt/utilities/lib/cykdtree/tests/__init__.py index 43868d15c4b..553bd2986af 100644 --- a/yt/utilities/lib/cykdtree/tests/__init__.py +++ b/yt/utilities/lib/cykdtree/tests/__init__.py @@ -28,11 +28,10 @@ def assert_less_equal(x, y): if not size_match: raise AssertionError( "Shape mismatch\n\n" - + "x.shape: %s\ny.shape: %s\n" % (str(x.shape), str(y.shape)) + + f"x.shape: {str(x.shape)}\ny.shape: {str(y.shape)}\n" ) raise AssertionError( - "Variables are not less-equal ordered\n\n" - + "x: %s\ny: %s\n" % (str(x), str(y)) + "Variables are not less-equal ordered\n\n" + f"x: {str(x)}\ny: {str(y)}\n" ) @@ -51,11 +50,10 @@ def call_subprocess(np, func, args, kwargs): str(np), sys.executable, "-c", - "'from %s import %s; %s(%s)'" - % (func.__module__, func.__name__, func.__name__, args_str), + f"'from {func.__module__} import {func.__name__}; {func.__name__}({args_str})'", ] cmd = " ".join(cmd) - print("Running the following command:\n%s" % cmd) + print(f"Running the following command:\n{cmd}") p = Popen(cmd, stdin=PIPE, stdout=PIPE, stderr=PIPE, shell=True) output, err = p.communicate() exit_code = p.returncode @@ -92,7 +90,7 @@ def wrapped(*args, **kwargs): wrapped.__name__ = func.__name__ for k, v in kwargs0.items(): - wrapped.__name__ += "_{}{}".format(k, v) + wrapped.__name__ += f"_{k}{v}" return wrapped def func_param(*args, **kwargs): @@ -184,7 +182,7 @@ def make_points(npts, ndim, leafsize=10, distrib="rand", seed=100): ) np.clip(pts, LE, RE) else: - raise ValueError("Invalid 'distrib': {}".format(distrib)) + raise ValueError(f"Invalid 'distrib': {distrib}") return pts, left_edge, right_edge, leafsize @@ -229,7 +227,7 @@ def run_test( if nproc > 1: kwargs["suppress_final_output"] = suppress_final_output if profile: - kwargs["profile"] = "{}_mpi_profile.dat".format(unique_str) + kwargs["profile"] = f"{unique_str}_mpi_profile.dat" # Run if profile: pr = cProfile.Profile() @@ -251,10 +249,10 @@ def run_test( ps.add(kwargs["profile"]) if isinstance(profile, str): ps.dump_stats(profile) - print("Stats saved to {}".format(profile)) + print(f"Stats saved to {profile}") else: sort_key = "tottime" ps.sort_stats(sort_key).print_stats(25) # ps.sort_stats(sort_key).print_callers(5) - print("{} s according to 'time'".format(t1 - t0)) + print(f"{t1 - t0} s according to 'time'") return ps diff --git a/yt/utilities/lib/cykdtree/tests/scaling.py b/yt/utilities/lib/cykdtree/tests/scaling.py index c6dd20cafc6..1cec264298d 100644 --- a/yt/utilities/lib/cykdtree/tests/scaling.py +++ b/yt/utilities/lib/cykdtree/tests/scaling.py @@ -41,15 +41,13 @@ def stats_run( perstr = "_periodic" if suppress_final_output: outstr = "_noout" - fname_stat = "stat_{}part_{}proc_{}dim{}{}.txt".format( - npart, nproc, ndim, perstr, outstr - ) + fname_stat = f"stat_{npart}part_{nproc}proc_{ndim}dim{perstr}{outstr}.txt" if overwrite or not os.path.isfile(fname_stat): cProfile.run( "from yt.utilities.lib.cykdtree.tests import run_test; " - + "run_test({}, {}, nproc={}, ".format(npart, ndim, nproc) - + "periodic={}, ".format(periodic) - + "suppress_final_output={})".format(suppress_final_output), + + f"run_test({npart}, {ndim}, nproc={nproc}, " + + f"periodic={periodic}, " + + f"suppress_final_output={suppress_final_output})", fname_stat, ) if display: @@ -147,7 +145,7 @@ def strong_scaling( leafsize=leafsize, suppress_final_output=suppress_final_output, ) - print("Finished {}D on {}.".format(ndim, nproc)) + print(f"Finished {ndim}D on {nproc}.") fig, axs = plt.subplots(1, 1) for i in range(len(ndim_list)): ndim = ndim_list[i] @@ -157,7 +155,7 @@ def strong_scaling( times[:, i, 0], yerr=times[:, i, 1], fmt=clr, - label="ndim = {}".format(ndim), + label=f"ndim = {ndim}", ) axs.set_xlabel("# of Processors") axs.set_ylabel("Time (s)") @@ -229,7 +227,7 @@ def weak_scaling( times[:, i, 0], yerr=times[:, i, 1], fmt=clr, - label="ndim = {}".format(ndim), + label=f"ndim = {ndim}", ) axs.set_xlabel("# of Processors") axs.set_ylabel("Time (s)") diff --git a/yt/utilities/lib/cykdtree/tests/test_kdtree.py b/yt/utilities/lib/cykdtree/tests/test_kdtree.py index c66a6e70a16..ef03954cd09 100644 --- a/yt/utilities/lib/cykdtree/tests/test_kdtree.py +++ b/yt/utilities/lib/cykdtree/tests/test_kdtree.py @@ -96,7 +96,7 @@ def time_tree_construction(Ntime, LStime, ndim=2): t0 = time.time() cykdtree.PyKDTree(pts, le, re, leafsize=LStime) t1 = time.time() - print("{} {}D points, leafsize {}: took {} s".format(Ntime, ndim, LStime, t1 - t0)) + print(f"{Ntime} {ndim}D points, leafsize {LStime}: took {t1 - t0} s") def time_neighbor_search(Ntime, LStime, ndim=2): @@ -105,7 +105,7 @@ def time_neighbor_search(Ntime, LStime, ndim=2): t0 = time.time() tree.get_neighbor_ids(0.5 * np.ones(tree.ndim, "double")) t1 = time.time() - print("{} {}D points, leafsize {}: took {} s".format(Ntime, ndim, LStime, t1 - t0)) + print(f"{Ntime} {ndim}D points, leafsize {LStime}: took {t1 - t0} s") def test_save_load(): diff --git a/yt/utilities/lib/tests/test_geometry_utils.py b/yt/utilities/lib/tests/test_geometry_utils.py index 14011ba55aa..bcf2a73fbd4 100644 --- a/yt/utilities/lib/tests/test_geometry_utils.py +++ b/yt/utilities/lib/tests/test_geometry_utils.py @@ -905,18 +905,14 @@ def test_get_morton_neighbors(): np.array([mi[i]], dtype=np.uint64), order=order, periodic=False ) ans = get_morton_indices(np.vstack([p[i, :], pn_non[i]])) - assert_array_equal( - np.unique(out), np.unique(ans), err_msg="Non-periodic: {}".format(i) - ) + assert_array_equal(np.unique(out), np.unique(ans), err_msg=f"Non-periodic: {i}") # Periodic for i in range(N): out = get_morton_neighbors( np.array([mi[i]], dtype=np.uint64), order=order, periodic=True ) ans = get_morton_indices(np.vstack([p[i, :], pn_per[i]])) - assert_array_equal( - np.unique(out), np.unique(ans), err_msg="Periodic: {}".format(i) - ) + assert_array_equal(np.unique(out), np.unique(ans), err_msg=f"Periodic: {i}") def test_dist(): diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py index 6f2f7c856dc..90b8b7f3cde 100644 --- a/yt/utilities/load_sample.py +++ b/yt/utilities/load_sample.py @@ -82,10 +82,10 @@ def load_sample(name=None, specific_file=None, pbar=True): if specific_file is None: # right now work on loading only untarred files. build out h5 later mylog.info("Default to loading %s for %s dataset", file_lookup, name) - loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, file_lookup) + loaded_file = os.path.join(base_path, f"{fileext}.untar", name, file_lookup) else: mylog.info("Loading %s for %s dataset", specific_file, name) - loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, specific_file) + loaded_file = os.path.join(base_path, f"{fileext}.untar", name, specific_file) return load(loaded_file, **optional_args) @@ -118,7 +118,7 @@ def _validate_sampledata_name(name): # Right now we are assuming that any name passed without an explicit # extension is packed in a tarball. This logic can be modified later to # be more flexible. - fileext = "%s.tar.gz" % name + fileext = f"{name}.tar.gz" basename = name extension = "tar" elif ext == ".gz": diff --git a/yt/utilities/lodgeit.py b/yt/utilities/lodgeit.py index 126f5ee6e6d..4b37601aeca 100644 --- a/yt/utilities/lodgeit.py +++ b/yt/utilities/lodgeit.py @@ -41,7 +41,7 @@ def fail(msg, code): """Bail out with an error message.""" - print("ERROR: %s" % msg, file=sys.stderr) + print(f"ERROR: {msg}", file=sys.stderr) sys.exit(code) @@ -118,7 +118,7 @@ def get_xmlrpc_service(): SERVICE_URL + "xmlrpc/", allow_none=True ) except Exception as err: - fail("Could not connect to Pastebin: %s" % err, -1) + fail(f"Could not connect to Pastebin: {err}", -1) return _xmlrpc_service @@ -202,7 +202,7 @@ def download_paste(uid): xmlrpc = get_xmlrpc_service() paste = xmlrpc.pastes.getPaste(uid) if not paste: - fail('Paste "%s" does not exist.' % uid, 5) + fail(f'Paste "{uid}" does not exist.', 5) code = paste["code"] print(code) @@ -242,9 +242,9 @@ def read_file(f): for fname in filenames: data = read_file(open(fname, "rb")) if langopt: - result.append("### %s [%s]\n\n" % (fname, langopt)) + result.append(f"### {fname} [{langopt}]\n\n") else: - result.append("### %s\n\n" % fname) + result.append(f"### {fname}\n\n") result.append(data) result.append("\n\n") data = "".join(result) @@ -306,7 +306,7 @@ def main( # check language if given if language and not language_exists(language): - print("Language %s is not supported." % language) + print(f"Language {language} is not supported.") return # load file(s) @@ -314,14 +314,14 @@ def main( try: data, language, filename, mimetype = compile_paste(args, language) except Exception as err: - fail("Error while reading the file(s): %s" % err, 2) + fail(f"Error while reading the file(s): {err}", 2) if not data: fail("Aborted, no content to paste.", 4) # create paste code = make_utf8(data, encoding).decode("utf-8") pid = create_paste(code, language, filename, mimetype, private) - url = "%sshow/%s/" % (SERVICE_URL, pid) + url = f"{SERVICE_URL}show/{pid}/" print(url) if open_browser: open_webbrowser(url) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index a993bbe7465..cfb24a48458 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -91,7 +91,7 @@ def _attr_list(self): def _return_filtered_object(self, attrs): new_attrs = tuple(attr for attr in self._attr_list if attr not in attrs) new_class = type( - "Filtered%s" % self.__class__.__name__, + f"Filtered{self.__class__.__name__}", (FilteredRepresentation,), {"_attr_list": new_attrs}, ) diff --git a/yt/utilities/on_demand_imports.py b/yt/utilities/on_demand_imports.py index 1e7370e5f95..059b8be78a9 100644 --- a/yt/utilities/on_demand_imports.py +++ b/yt/utilities/on_demand_imports.py @@ -13,8 +13,7 @@ class NotAModule: def __init__(self, pkg_name): self.pkg_name = pkg_name self.error = ImportError( - "This functionality requires the %s " - "package to be installed." % self.pkg_name + f"This functionality requires the {self.pkg_name} package to be installed." ) def __getattr__(self, item): diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index b19075763f4..a6a5ce04322 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -49,10 +49,10 @@ def filter(self, record): def traceback_writer_hook(file_suffix=""): def write_to_file(exc_type, exc, tb): sys.__excepthook__(exc_type, exc, tb) - fn = "yt_traceback%s" % file_suffix + fn = f"yt_traceback{file_suffix}" with open(fn, "w") as fhandle: traceback.print_exception(exc_type, exc, tb, file=fhandle) - print("Wrote traceback to %s" % fn) + print(f"Wrote traceback to {fn}") MPI.COMM_WORLD.Abort(1) return write_to_file diff --git a/yt/utilities/parameter_file_storage.py b/yt/utilities/parameter_file_storage.py index db8a6f7e41a..daf08f0d531 100644 --- a/yt/utilities/parameter_file_storage.py +++ b/yt/utilities/parameter_file_storage.py @@ -22,10 +22,10 @@ def __init__(self, name): self.name = name def __str__(self): - return "%s" % self.name + return f"{self.name}" def __repr__(self): - return "%s" % self.name + return f"{self.name}" class ParameterFileStore: @@ -86,7 +86,7 @@ def _get_db_name(self): base_file_name = ytcfg.get("yt", "ParameterFileStore") if not os.access(os.path.expanduser("~/"), os.W_OK): return os.path.abspath(base_file_name) - return os.path.expanduser("~/.yt/%s" % base_file_name) + return os.path.expanduser(f"~/.yt/{base_file_name}") def get_ds_hash(self, hash): """ This returns a dataset based on a hash. """ @@ -174,7 +174,7 @@ def _write_out(self): if self._read_only: return fn = self._get_db_name() - f = open("%s.tmp" % fn, "wb") + f = open(f"{fn}.tmp", "wb") w = csv.DictWriter(f, _field_names) maxn = ytcfg.getint("yt", "maximumstoreddatasets") # number written for h, v in islice( @@ -183,7 +183,7 @@ def _write_out(self): v["hash"] = h w.writerow(v) f.close() - os.rename("%s.tmp" % fn, fn) + os.rename(f"{fn}.tmp", fn) @parallel_simple_proxy def read_db(self): diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index 30e81e3f877..d33511f9a58 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -22,7 +22,7 @@ def __init__(self, ds, num_particles, field_list, ptype="io"): ] self.field_list.append((ptype, "particle_index")) self.field_units = dict( - ((ptype, "particle_position_%s" % ax), "code_length") for ax in "xyz" + ((ptype, f"particle_position_{ax}"), "code_length") for ax in "xyz" ) self.field_units[ptype, "particle_index"] = "" self.ptype = ptype diff --git a/yt/utilities/performance_counters.py b/yt/utilities/performance_counters.py index 392cb00b5bf..64126161259 100644 --- a/yt/utilities/performance_counters.py +++ b/yt/utilities/performance_counters.py @@ -129,8 +129,8 @@ def write_out(self, filename_prefix): ytcfg.getint("yt", "__global_parallel_size"), ) else: - pfn = "%s" % (filename_prefix) + pfn = f"{filename_prefix}" for n, p in sorted(self.profilers.items()): - fn = "%s_%s.cprof" % (pfn, n) + fn = f"{pfn}_{n}.cprof" mylog.info("Dumping %s into %s", n, fn) p.dump_stats(fn) diff --git a/yt/utilities/periodic_table.py b/yt/utilities/periodic_table.py index ba71454a8bf..db36b8baa69 100644 --- a/yt/utilities/periodic_table.py +++ b/yt/utilities/periodic_table.py @@ -134,7 +134,7 @@ def __init__(self, num, weight, name, symbol): self.symbol = symbol def __repr__(self): - return "Element: %s (%s)" % (self.symbol, self.name) + return f"Element: {self.symbol} ({self.name})" class PeriodicTable: diff --git a/yt/utilities/rpdb.py b/yt/utilities/rpdb.py index 3d6d9072f23..eca559586cf 100644 --- a/yt/utilities/rpdb.py +++ b/yt/utilities/rpdb.py @@ -40,9 +40,7 @@ def rpdb_excepthook(exc_type, exc, tb): traceback.print_exception(exc_type, exc, tb) task = ytcfg.getint("yt", "__global_parallel_rank") size = ytcfg.getint("yt", "__global_parallel_size") - print( - "Starting RPDB server on task %s ; connect with 'yt rpdb -t %s'" % (task, task) - ) + print(f"Starting RPDB server on task {task} ; connect with 'yt rpdb -t {task}'") handler = pdb_handler(tb) server = PdbXMLRPCServer(("localhost", 8010 + task)) server.register_introspection_functions() @@ -92,7 +90,7 @@ def do_shutdown(self, args): return True def do_help(self, line): - print(self.proxy.execute("help %s" % line)) + print(self.proxy.execute(f"help {line}")) def postcmd(self, stop, line): return stop @@ -119,7 +117,7 @@ def run_rpdb(task=None): except Exception: pass port += task - sp = ServerProxy("http://localhost:%s/" % port) + sp = ServerProxy(f"http://localhost:{port}/") try: pp = rpdb_cmd(sp) except socket.error: diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index 4d0b225cc47..8df3933a4bc 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -312,13 +312,13 @@ def __init__(self, filename=None, header=None): def write(self, filename): f = open(filename, "w") f.write("# SDF 1.0\n") - f.write("parameter byteorder = %s;\n" % (self.parameters["byteorder"])) + f.write(f"parameter byteorder = {self.parameters['byteorder']};\n") for c in self.comments: if "\x0c" in c: continue if "SDF 1.0" in c: continue - f.write("%s" % c) + f.write(f"{c}") for k, v in sorted(self.parameters.items()): if k == "byteorder": continue @@ -327,9 +327,9 @@ def write(self, filename): except Exception: t = type(v).__name__ if t == str.__name__: - f.write('parameter %s = "%s";\n' % (k, v)) + f.write(f'parameter {k} = "{v}";\n') else: - f.write("%s %s = %s;\n" % (t, k, v)) + f.write(f"{t} {k} = {v};\n") struct_order = [] for s in self.structs: @@ -338,7 +338,7 @@ def write(self, filename): for var in s.dtype.descr: k, v = var[0], _rev_types[var[1]] to_write.append(k) - f.write("\t%s %s;\n" % (v, k)) + f.write(f"\t{v} {k};\n") f.write("}[%i];\n" % s.size) struct_order.append(to_write) f.write("#\x0c\n") @@ -346,13 +346,13 @@ def write(self, filename): return struct_order, f def __repr__(self): - disp = " file: %s\n" % self.filename + disp = f" file: {self.filename}\n" disp += "parameters: \n" for k, v in self.parameters.items(): - disp += "\t%s: %s\n" % (k, v) + disp += f"\t{k}: {v}\n" disp += "arrays: \n" for k, v in self.items(): - disp += "\t%s[%s]\n" % (k, v.size) + disp += f"\t{k}[{v.size}]\n" return disp def parse_header(self): @@ -394,12 +394,12 @@ def parse_line(self, line, ascfile): vtype = "str" try: - vval = eval("np." + vtype + "(%s)" % vval) + vval = eval("np." + vtype + f"({vval})") except AttributeError: if vtype not in _types: mylog.warning("Skipping parameter %s", vname) return - vval = eval("np." + _types[vtype] + "(%s)" % vval) + vval = eval("np." + _types[vtype] + f"({vval})") self.parameters[vname] = vval diff --git a/yt/utilities/tests/test_config.py b/yt/utilities/tests/test_config.py index 388fc8aad33..8d46d78b7bc 100644 --- a/yt/utilities/tests/test_config.py +++ b/yt/utilities/tests/test_config.py @@ -122,7 +122,7 @@ def setUp(self): with open(_OLD_CONFIG_FILE, "w") as fh: for line in _DUMMY_CFG: - fh.write("{}\n".format(line)) + fh.write(f"{line}\n") if os.path.exists(CURRENT_CONFIG_FILE): os.remove(CURRENT_CONFIG_FILE) diff --git a/yt/utilities/tests/test_cosmology.py b/yt/utilities/tests/test_cosmology.py index f35fe4072f6..381409c53d8 100644 --- a/yt/utilities/tests/test_cosmology.py +++ b/yt/utilities/tests/test_cosmology.py @@ -126,9 +126,7 @@ def t_from_z_analytic(z, hubble_constant=0.7, omega_matter=0.3, omega_lambda=0.7 ) else: - raise NotImplementedError( - "%s, %s, %s" % (hubble_constant, omega_matter, omega_lambda) - ) + raise NotImplementedError(f"{hubble_constant}, {omega_matter}, {omega_lambda}") # Now convert from Time * H0 to time. @@ -179,8 +177,7 @@ def test_z_t_analytic(): t_an, t_co, 4, - err_msg="t_from_z does not match analytic version for cosmology %s." - % cosmo, + err_msg=f"t_from_z does not match analytic version for cosmology {cosmo}.", ) # random sample in log(t/t0) from -3 to 1 @@ -195,8 +192,7 @@ def test_z_t_analytic(): 1 / (1 + z_an), 1 / (1 + z_co), 5, - err_msg="z_from_t does not match analytic version for cosmology %s." - % cosmo, + err_msg=f"z_from_t does not match analytic version for cosmology {cosmo}.", ) diff --git a/yt/visualization/base_plot_types.py b/yt/visualization/base_plot_types.py index 4bfc142857a..12d1227eed2 100644 --- a/yt/visualization/base_plot_types.py +++ b/yt/visualization/base_plot_types.py @@ -142,7 +142,7 @@ def save(self, name, mpl_kwargs=None, canvas=None): suffix = get_image_suffix(name) if suffix == "": suffix = ".png" - name = "%s%s" % (name, suffix) + name = f"{name}{suffix}" mylog.info("Saving plot %s", name) diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index aaa70508477..d79365c2019 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -490,7 +490,7 @@ def add_colormap(name, cdict): # Add colormaps from cmocean, if it's installed if cmocean is not None: cmo_cmapnames = cmocean.cm.cmapnames - cmo_cmapnames += ["%s_r" % name for name in cmo_cmapnames] + cmo_cmapnames += [f"{name}_r" for name in cmo_cmapnames] for cmname in cmo_cmapnames: cm = getattr(cmocean.cm, cmname) # cmocean has a colormap named 'algae', so let's avoid overwriting diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 9f2ddc1ef56..0e77f9428ac 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -415,18 +415,18 @@ def axis_box_yt( if data.axis != 4: xi = plot.ds.coordinates.x_axis[data.axis] x_name = plot.ds.coordinates.axis_name[xi] - _xlabel = "%s (%s)" % (x_name, units) + _xlabel = f"{x_name} ({units})" else: - _xlabel = "x (%s)" % (units) + _xlabel = f"x ({units})" if ylabel is not None: _ylabel = ylabel else: if data.axis != 4: yi = plot.ds.coordinates.y_axis[data.axis] y_name = plot.ds.coordinates.axis_name[yi] - _ylabel = "%s (%s)" % (y_name, units) + _ylabel = f"{y_name} ({units})" else: - _ylabel = "y (%s)" % (units) + _ylabel = f"y ({units})" if tickcolor is None: _tickcolor = pyx.color.cmyk.white elif isinstance(plot, ProfilePlot): @@ -724,7 +724,7 @@ def colorbar( size = (self.figsize[0], 0.1 * self.figsize[1]) imsize = (256, 1) else: - raise RuntimeError("orientation %s unknown" % orientation) + raise RuntimeError(f"orientation {orientation} unknown") return # If shrink is a scalar, then convert into tuple @@ -1153,7 +1153,7 @@ def save_fig(self, filename="test", format="eps", resolution=250): elif format == "jpg": self.canvas.writeGSfile(filename + ".jpeg", "jpeg", resolution=resolution) else: - raise RuntimeError("format %s unknown." % (format)) + raise RuntimeError(f"format {format} unknown.") # ============================================================================= @@ -1425,7 +1425,7 @@ def multiplot( if isinstance(cb_location, dict): if fields[index] not in cb_location.keys(): raise RuntimeError( - "%s not found in cb_location dict" % fields[index] + f"{fields[index]} not found in cb_location dict" ) return orientation = cb_location[fields[index]] diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 7baba6435a3..14e86420e62 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -32,7 +32,7 @@ def data(self): def __repr__(self): im_shape = " x ".join([str(s) for s in self.shape]) - return "FITSImage: %s (%s, %s)" % (self.name, im_shape, self.units) + return f"FITSImage: {self.name} ({im_shape}, {self.units})" class FITSImageData: @@ -240,10 +240,9 @@ def __init__( ftype, fname = fields[i].name else: raise RuntimeError( - "Cannot distinguish between fields " - "with same name %s!" % fd + f"Cannot distinguish between fields with same name {fd}!" ) - self.fields[i] = "%s_%s" % (ftype, fname) + self.fields[i] = f"{ftype}_{fname}" first = True for i, name, field in zip(count(), self.fields, fields): @@ -283,11 +282,11 @@ def __init__( short_unit = "bf" else: short_unit = unit[0] - key = "{}unit".format(short_unit) - value = getattr(self, "{}_unit".format(unit)) + key = f"{short_unit}unit" + value = getattr(self, f"{unit}_unit") if value is not None: hdu.header[key] = float(value.value) - hdu.header.comments[key] = "[%s]" % value.units + hdu.header.comments[key] = f"[{value.units}]" hdu.header["time"] = float(self.current_time.value) self.hdulist.append(hdu) @@ -489,7 +488,7 @@ def convolve(self, field, kernel, **kwargs): raise RuntimeError("Convolution currently only works for 2D FITSImageData!") conv = _astropy.conv if field not in self.keys(): - raise KeyError("%s not an image!" % field) + raise KeyError(f"{field} not an image!") idx = self.fields.index(field) if not isinstance(kernel, conv.Kernel): if not isinstance(kernel, numeric_type): @@ -514,7 +513,7 @@ def update_header(self, field, key, value): img.header[key] = value else: if field not in self.keys(): - raise KeyError("%s not an image!" % field) + raise KeyError(f"{field} not an image!") idx = self.fields.index(field) self.hdulist[idx].header[key] = value @@ -574,7 +573,7 @@ def info(self, output=None): name = "(No file associated with this FITSImageData)" else: name = self.hdulist._file.name - results = ["Filename: {}".format(name), header] + results = [f"Filename: {name}", header] for line in hinfo: units = self.field_units[self.hdulist[line[0]].header["btype"]] summary = tuple(list(line[:-1]) + [units]) @@ -673,7 +672,7 @@ def set_unit(self, field, units): Set the units of *field* to *units*. """ if field not in self.keys(): - raise KeyError("%s not an image!" % field) + raise KeyError(f"{field} not an image!") idx = self.fields.index(field) new_data = YTArray(self.hdulist[idx].data, self.field_units[field]).to(units) self.hdulist[idx].data = new_data.v @@ -687,7 +686,7 @@ def pop(self, key): instance. """ if key not in self.keys(): - raise KeyError("%s not an image!" % key) + raise KeyError(f"{key} not an image!") idx = self.fields.index(key) im = self.hdulist.pop(idx) self.field_units.pop(key) @@ -790,8 +789,7 @@ def create_sky_wcs( scaleq = YTQuantity(sky_scale[0], sky_scale[1]) if scaleq.units.dimensions != dimensions.angle / dimensions.length: raise RuntimeError( - "sky_scale %s not in correct " % sky_scale - + "dimensions of angle/length!" + f"sky_scale {sky_scale} not in correct " + "dimensions of angle/length!" ) deltas = old_wcs.wcs.cdelt units = [str(unit) for unit in old_wcs.wcs.cunit] diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index 05e3f7378ff..256e4b3df3e 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -514,7 +514,7 @@ def save_as_dataset(self, filename=None, fields=None): """ - keyword = "%s_%s_frb" % (str(self.ds), self.data_source._type_name) + keyword = f"{str(self.ds)}_{self.data_source._type_name}_frb" filename = get_output_filename(filename, keyword, ".h5") data = {} diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index 4975d521761..1aa58ee18b9 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -311,8 +311,8 @@ def strip_colormap_data( cmaps = [cmaps] for cmap_name in sorted(cmaps): vals = rcm._extract_lookup_table(cmap_name) - f.write("### %s ###\n\n" % (cmap_name)) - f.write("color_map_luts['%s'] = \\\n" % (cmap_name)) + f.write(f"### {cmap_name} ###\n\n") + f.write(f"color_map_luts['{cmap_name}'] = \\\n") f.write(" (\n") for v in vals: f.write(pprint.pformat(v, indent=3)) @@ -443,7 +443,7 @@ def write_projection( if suffix == "": suffix = ".png" - filename = "%s%s" % (filename, suffix) + filename = f"{filename}{suffix}" mylog.info("Saving plot %s", filename) if suffix == ".pdf": canvas = FigureCanvasPdf(fig) diff --git a/yt/visualization/mapserver/pannable_map.py b/yt/visualization/mapserver/pannable_map.py index a5a2db0d601..b9d8f236207 100644 --- a/yt/visualization/mapserver/pannable_map.py +++ b/yt/visualization/mapserver/pannable_map.py @@ -36,16 +36,16 @@ def __init__(self, data, field, takelog, cmap, route_prefix=""): self.field = field self.cmap = cmap - bottle.route("%s/map/:field/:L/:x/:y.png" % route_prefix)(self.map) - bottle.route("%s/map/:field/:L/:x/:y.png" % route_prefix)(self.map) - bottle.route("%s/" % route_prefix)(self.index) - bottle.route("%s/:field" % route_prefix)(self.index) - bottle.route("%s/index.html" % route_prefix)(self.index) - bottle.route("%s/list" % route_prefix, "GET")(self.list_fields) + bottle.route(f"{route_prefix}/map/:field/:L/:x/:y.png")(self.map) + bottle.route(f"{route_prefix}/map/:field/:L/:x/:y.png")(self.map) + bottle.route(f"{route_prefix}/")(self.index) + bottle.route(f"{route_prefix}/:field")(self.index) + bottle.route(f"{route_prefix}/index.html")(self.index) + bottle.route(f"{route_prefix}/list", "GET")(self.list_fields) # This is a double-check, since we do not always mandate this for # slices: self.data[self.field] = self.data[self.field].astype("float64") - bottle.route("%s/static/:path" % route_prefix, "GET")(self.static) + bottle.route(f"{route_prefix}/static/:path", "GET")(self.static) self.takelog = takelog self._lock = False @@ -123,7 +123,7 @@ def index(self, field=None): def static(self, path): if path[-4:].lower() in (".png", ".gif", ".jpg"): - bottle.response.headers["Content-Type"] = "image/%s" % (path[-3:].lower()) + bottle.response.headers["Content-Type"] = f"image/{path[-3:].lower()}" elif path[-4:].lower() == ".css": bottle.response.headers["Content-Type"] = "text/css" elif path[-3:].lower() == ".js": @@ -137,8 +137,8 @@ def list_fields(self): # Add deposit fields (only cic + density for now) for ptype in self.ds.particle_types: d[ptype] = [ - (("deposit", "%s_cic" % ptype), False), - (("deposit", "%s_density" % ptype), False), + (("deposit", f"{ptype}_cic"), False), + (("deposit", f"{ptype}_density"), False), ] # Add fluid fields (only gas for now) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index abdda1bd9a6..4cd582aec5d 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -518,12 +518,12 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if isinstance(k, tuple): k = k[1] if axis: - n = "%s_%s_%s_%s" % (name, type, axis, k.replace(" ", "_")) + n = f"{name}_{type}_{axis}_{k.replace(' ', '_')}" else: # for cutting planes - n = "%s_%s_%s" % (name, type, k.replace(" ", "_")) + n = f"{name}_{type}_{k.replace(' ', '_')}" if weight: - n += "_%s" % (weight) + n += f"_{weight}" if suffix != "": n = ".".join([n, suffix]) names.append(v.save(n, mpl_kwargs)) @@ -633,7 +633,7 @@ def _get_axes_unit_labels(self, unit_x, unit_y): # This *forces* an override unn = self.ds.coordinates.image_units[self.data_source.axis][i] elif hasattr(self.ds.coordinates, "default_unit_label"): - axax = getattr(self.ds.coordinates, "%s_axis" % ("xy"[i]))[ + axax = getattr(self.ds.coordinates, f"{'xy'[i]}_axis")[ self.data_source.axis ] unn = self.ds.coordinates.default_unit_label.get(axax, None) diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 5f79e4abbce..d3be70752c8 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -362,8 +362,8 @@ def __call__(self, plot): else: # for other cases (even for cylindrical geometry), # orthogonal planes are generically Cartesian - xv = "velocity_%s" % axis_names[xax] - yv = "velocity_%s" % axis_names[yax] + xv = f"velocity_{axis_names[xax]}" + yv = f"velocity_{axis_names[yax]}" qcb = QuiverCallback( xv, @@ -439,8 +439,8 @@ def __call__(self, plot): else: # for other cases (even for cylindrical geometry), # orthogonal planes are generically Cartesian - xv = "magnetic_field_%s" % axis_names[xax] - yv = "magnetic_field_%s" % axis_names[yax] + xv = f"magnetic_field_{axis_names[xax]}" + yv = f"magnetic_field_{axis_names[yax]}" qcb = QuiverCallback( xv, @@ -507,7 +507,7 @@ def _transformed_field(field, data): return data[field_name] - data.ds.arr(vector_value, field_units) plot.data.ds.add_field( - ("gas", "transformed_%s" % field_name), + ("gas", f"transformed_{field_name}"), sampling_type="cell", function=_transformed_field, units=field_units, @@ -518,8 +518,8 @@ def _transformed_field(field, data): # We create a relative vector field transform(self.field_x, self.bv_x) transform(self.field_y, self.bv_y) - field_x = "transformed_%s" % self.field_x - field_y = "transformed_%s" % self.field_y + field_x = f"transformed_{self.field_x}" + field_y = f"transformed_{self.field_y}" else: field_x, field_y = self.field_x, self.field_y @@ -1216,8 +1216,8 @@ def __call__(self, plot): xf = plot.data.ds.coordinates.axis_name[px_index] yf = plot.data.ds.coordinates.axis_name[py_index] - dxf = "d%s" % xf - dyf = "d%s" % yf + dxf = f"d{xf}" + dyf = f"d{yf}" ny, nx = plot.image._A.shape buff = np.zeros((nx, ny), dtype="float64") @@ -1230,7 +1230,7 @@ def __call__(self, plot): ftype = "grid" else: raise RuntimeError( - "Unknown field type for object of type %s." % type(clump) + f"Unknown field type for object of type {type(clump)}." ) xf_copy = clump[ftype, xf].copy().in_units("code_length") @@ -1928,9 +1928,9 @@ def __call__(self, plot): axis_names = plot.data.ds.coordinates.axis_name xax = plot.data.ds.coordinates.x_axis[data.axis] yax = plot.data.ds.coordinates.y_axis[data.axis] - field_x = "%s_%s" % (self.center_field_prefix, axis_names[xax]) - field_y = "%s_%s" % (self.center_field_prefix, axis_names[yax]) - field_z = "%s_%s" % (self.center_field_prefix, axis_names[data.axis]) + field_x = f"{self.center_field_prefix}_{axis_names[xax]}" + field_y = f"{self.center_field_prefix}_{axis_names[yax]}" + field_z = f"{self.center_field_prefix}_{axis_names[data.axis]}" # Set up scales for pixel size and original data pixel_scale = self._pixel_scale(plot)[0] @@ -1987,7 +1987,7 @@ def __call__(self, plot): if self.annotate_field: annotate_dat = halo_data[self.annotate_field] - texts = ["{:g}".format(float(dat)) for dat in annotate_dat] + texts = [f"{float(dat):g}" for dat in annotate_dat] labels = [] for pos_x, pos_y, t in zip(px, py, texts): labels.append(plot._axes.text(pos_x, pos_y, t, **self.text_args)) @@ -2066,8 +2066,8 @@ def __call__(self, plot): xax = plot.data.ds.coordinates.x_axis[ax] yax = plot.data.ds.coordinates.y_axis[ax] axis_names = plot.data.ds.coordinates.axis_name - field_x = "particle_position_%s" % axis_names[xax] - field_y = "particle_position_%s" % axis_names[yax] + field_x = f"particle_position_{axis_names[xax]}" + field_y = f"particle_position_{axis_names[yax]}" pt = self.ptype self.periodic_x = plot.data.ds.periodicity[xax] self.periodic_y = plot.data.ds.periodicity[yax] diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 834483cd74f..6378f185a44 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -359,8 +359,7 @@ def pan(self, deltas): """ if len(deltas) != 2: raise RuntimeError( - "The pan function accepts a two-element sequence.\n" - "Received %s." % (deltas,) + f"The pan function accepts a two-element sequence.\nReceived {deltas}." ) if isinstance(deltas[0], Number) and isinstance(deltas[1], Number): deltas = ( @@ -426,8 +425,7 @@ def set_unit(self, field, new_unit, equivalency=None, equivalency_kwargs=None): new_unit = ensure_list(new_unit) if len(field) > 1 and len(new_unit) != len(field): raise RuntimeError( - "Field list {} and unit " - "list {} are incompatible".format(field, new_unit) + f"Field list {field} and unit list {new_unit} are incompatible" ) for f, u in zip(field, new_unit): self.frb.set_unit(f, u, equivalency, equivalency_kwargs) @@ -966,7 +964,7 @@ def _setup_plots(self): "values. Max = %f." % (f, np.nanmax(image)) ) elif not np.any(np.isfinite(image)): - msg = "Plot image for field %s is filled with NaNs." % (f,) + msg = f"Plot image for field {f} is filled with NaNs." elif np.nanmax(image) > 0.0 and np.nanmin(image) < 0: msg = ( "Plot image for field %s has both positive " @@ -1520,7 +1518,7 @@ def __init__( slc = ds.all_data() slc.axis = axis if slc.axis != ds.parameters["axis"]: - raise RuntimeError("Original slice axis is %s." % ds.parameters["axis"]) + raise RuntimeError(f"Original slice axis is {ds.parameters['axis']}.") else: slc = ds.slice( axis, @@ -1751,7 +1749,7 @@ def __init__( proj.axis = axis if proj.axis != ds.parameters["axis"]: raise RuntimeError( - "Original projection axis is %s." % ds.parameters["axis"] + f"Original projection axis is {ds.parameters['axis']}." ) if weight_field is not None: proj.weight_field = proj._determine_fields(weight_field)[0] @@ -2510,9 +2508,7 @@ def plot_2d( axis = "phi" else: raise NotImplementedError( - "plot_2d does not yet support datasets with {} geometries".format( - ds.geometry - ) + f"plot_2d does not yet support datasets with {ds.geometry} geometries" ) # Part of the convenience of plot_2d is to eliminate the use of the # superfluous coordinate, so we do that also with the center argument diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 87d5e2c2c27..35936486d1f 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -289,14 +289,14 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): iters = self.plots.items() if not suffix: suffix = "png" - suffix = ".%s" % suffix + suffix = f".{suffix}" fullname = False if name is None: if len(self.profiles) == 1: prefix = self.profiles[0].ds else: prefix = "Multi-data" - name = "%s%s" % (prefix, suffix) + name = f"{prefix}{suffix}" else: sfx = get_image_suffix(name) if sfx != "": @@ -313,9 +313,9 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): if isinstance(uid, tuple): uid = uid[1] if fullname: - fns.append("%s%s" % (prefix, suffix)) + fns.append(f"{prefix}{suffix}") else: - fns.append("%s_1d-Profile_%s_%s%s" % (prefix, xfn, uid, suffix)) + fns.append(f"{prefix}_1d-Profile_{xfn}_{uid}{suffix}") mylog.info("Saving %s", fns[-1]) with matplotlib_style_context(): plot.save(fns[-1], mpl_kwargs=mpl_kwargs) @@ -548,7 +548,7 @@ def set_log(self, field, log): elif field in self.profiles[0].field_data: self.y_log[field] = log else: - raise KeyError("Field %s not in profile plot!" % (field)) + raise KeyError(f"Field {field} not in profile plot!") return self @invalidate_plot @@ -571,7 +571,7 @@ def set_ylabel(self, field, label): if field in self.profiles[0].field_data: self.y_title[field] = label else: - raise KeyError("Field %s not in profile plot!" % (field)) + raise KeyError(f"Field {field} not in profile plot!") return self @@ -607,7 +607,7 @@ def set_unit(self, field, unit): elif fd[1] in self.profiles[0].field_map: profile.set_field_unit(field, unit) else: - raise KeyError("Field %s not in profile plot!" % (field)) + raise KeyError(f"Field {field} not in profile plot!") return self @invalidate_plot @@ -1292,7 +1292,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): _f = f if isinstance(f, tuple): _f = _f[1] - middle = "2d-Profile_%s_%s_%s" % (xfn, yfn, _f) + middle = f"2d-Profile_{xfn}_{yfn}_{_f}" splitname = os.path.split(name) if splitname[0] != "" and not os.path.isdir(splitname[0]): os.makedirs(splitname[0]) @@ -1309,7 +1309,7 @@ def save(self, name=None, suffix=None, mpl_kwargs=None): return names else: suffix = "png" - fn = "%s_%s.%s" % (prefix, middle, suffix) + fn = f"{prefix}_{middle}.{suffix}" names.append(fn) self.plots[f].save(fn, mpl_kwargs) return names @@ -1451,7 +1451,7 @@ def set_log(self, field, log): elif field in p.field_data: self.z_log[field] = log else: - raise KeyError("Field %s not in phase plot!" % (field)) + raise KeyError(f"Field {field} not in phase plot!") return self @invalidate_plot @@ -1475,7 +1475,7 @@ def set_unit(self, field, unit): self.profile.set_field_unit(field, unit) self.plots[field].zmin, self.plots[field].zmax = (None, None) else: - raise KeyError("Field %s not in phase plot!" % (field)) + raise KeyError(f"Field {field} not in phase plot!") return self @invalidate_plot diff --git a/yt/visualization/tests/test_geo_projections.py b/yt/visualization/tests/test_geo_projections.py index 96a8587a28c..4ed175dd2cd 100644 --- a/yt/visualization/tests/test_geo_projections.py +++ b/yt/visualization/tests/test_geo_projections.py @@ -28,7 +28,7 @@ def slice_image(filename_prefix): image_file = sl.save(filename_prefix) return image_file - slice_image.__name__ = "slice_{}".format(test_prefix) + slice_image.__name__ = f"slice_{test_prefix}" test = GenericImageTest(ds, slice_image, decimals) test.prefix = test_prefix test.answer_name = test_name @@ -47,7 +47,7 @@ def test_geo_slices_amr(): # avoid crashes, see https://github.com/SciTools/cartopy/issues/1177 continue for field in ds.field_list: - prefix = "%s_%s_%s" % (field[0], field[1], transform) + prefix = f"{field[0]}_{field[1]}_{transform}" yield compare( ds, field, diff --git a/yt/visualization/tests/test_line_plots.py b/yt/visualization/tests/test_line_plots.py index 853ca8a6df9..d1a5b92644f 100644 --- a/yt/visualization/tests/test_line_plots.py +++ b/yt/visualization/tests/test_line_plots.py @@ -18,7 +18,7 @@ def compare(ds, plot, test_prefix, test_name, decimals=12): def image_from_plot(filename_prefix): return plot.save(filename_prefix) - image_from_plot.__name__ = "line_{}".format(test_prefix) + image_from_plot.__name__ = f"line_{test_prefix}" test = GenericImageTest(ds, image_from_plot, decimals) test.prefix = test_prefix test.answer_name = test_name diff --git a/yt/visualization/tests/test_mesh_slices.py b/yt/visualization/tests/test_mesh_slices.py index 7da7a05b1cd..e5ef798c850 100644 --- a/yt/visualization/tests/test_mesh_slices.py +++ b/yt/visualization/tests/test_mesh_slices.py @@ -30,7 +30,7 @@ def slice_image(filename_prefix): image_file = sl.save(filename_prefix) return image_file - slice_image.__name__ = "slice_{}".format(test_prefix) + slice_image.__name__ = f"slice_{test_prefix}" test = GenericImageTest(ds, slice_image, decimals) test.prefix = test_prefix test.answer_name = test_name @@ -41,7 +41,7 @@ def slice_image(filename_prefix): def test_mesh_slices_amr(): ds = fake_amr_ds() for field in ds.field_list: - prefix = "%s_%s_%s" % (field[0], field[1], 0) + prefix = f"{field[0]}_{field[1]}_{0}" yield compare(ds, field, 0, test_prefix=prefix, test_name="mesh_slices_amr") @@ -54,7 +54,7 @@ def test_mesh_slices_tetrahedral(): for field in ds.field_list: for idir in [0, 1, 2]: - prefix = "%s_%s_%s" % (field[0], field[1], idir) + prefix = f"{field[0]}_{field[1]}_{idir}" yield compare( ds, field, @@ -78,7 +78,7 @@ def test_mesh_slices_hexahedral(): for field in ds.field_list: for idir in [0, 1, 2]: - prefix = "%s_%s_%s" % (field[0], field[1], idir) + prefix = f"{field[0]}_{field[1]}_{idir}" yield compare( ds, field, diff --git a/yt/visualization/tests/test_plotwindow.py b/yt/visualization/tests/test_plotwindow.py index 3de3037eebc..f75f89a763d 100644 --- a/yt/visualization/tests/test_plotwindow.py +++ b/yt/visualization/tests/test_plotwindow.py @@ -155,7 +155,7 @@ def simple_streamlines(test_obj, plot): yax = test_obj.ds.coordinates.y_axis[ax] xn = test_obj.ds.coordinates.axis_name[xax] yn = test_obj.ds.coordinates.axis_name[yax] - plot.annotate_streamlines("velocity_%s" % xn, "velocity_%s" % yn) + plot.annotate_streamlines(f"velocity_{xn}", f"velocity_{yn}") CALLBACK_TESTS = ( diff --git a/yt/visualization/tests/test_profile_plots.py b/yt/visualization/tests/test_profile_plots.py index ad5c810b974..28b7122d5ef 100644 --- a/yt/visualization/tests/test_profile_plots.py +++ b/yt/visualization/tests/test_profile_plots.py @@ -36,7 +36,7 @@ def compare(ds, plot, test_prefix, test_name, decimals=12): def image_from_plot(filename_prefix): return plot.save(filename_prefix) - image_from_plot.__name__ = "profile_{}".format(test_prefix) + image_from_plot.__name__ = f"profile_{test_prefix}" test = GenericImageTest(ds, image_from_plot, decimals) test.prefix = test_prefix test.answer_name = test_name @@ -64,7 +64,7 @@ def test_phase_plot_attributes(): test = PhasePlotAttributeTest( ds, x_field, y_field, z_field, attr_name, args, decimals ) - test.prefix = "%s_%s" % (attr_name, args) + test.prefix = f"{attr_name}_{args}" test.answer_name = "phase_plot_attributes" yield test @@ -98,7 +98,7 @@ def test_profile_plot(): ) profiles[0]._repr_html_() for idx, plot in enumerate(profiles): - test_prefix = "%s_%s" % (plot.plots.keys(), idx) + test_prefix = f"{plot.plots.keys()}_{idx}" yield compare(test_ds, plot, test_prefix=test_prefix, test_name="profile_plots") @@ -146,7 +146,7 @@ def test_phase_plot(): phases.append(pp) phases[0]._repr_html_() for idx, plot in enumerate(phases): - test_prefix = "%s_%s" % (plot.plots.keys(), idx) + test_prefix = f"{plot.plots.keys()}_{idx}" yield compare(test_ds, plot, test_prefix=test_prefix, test_name="phase_plots") diff --git a/yt/visualization/tests/test_raw_field_slices.py b/yt/visualization/tests/test_raw_field_slices.py index c23aecc8dcd..29654ec7cfb 100644 --- a/yt/visualization/tests/test_raw_field_slices.py +++ b/yt/visualization/tests/test_raw_field_slices.py @@ -20,7 +20,7 @@ def slice_image(filename_prefix): image_file = sl.save(filename_prefix) return image_file - slice_image.__name__ = "slice_{}".format(test_prefix) + slice_image.__name__ = f"slice_{test_prefix}" test = GenericImageTest(ds, slice_image, decimals) test.prefix = test_prefix return test @@ -44,4 +44,4 @@ def slice_image(filename_prefix): def test_raw_field_slices(): ds = data_dir_load(raw_fields) for field in _raw_field_names: - yield compare(ds, field, "answers_raw_%s" % field[1]) + yield compare(ds, field, f"answers_raw_{field[1]}") diff --git a/yt/visualization/volume_rendering/camera.py b/yt/visualization/volume_rendering/camera.py index d067f1fd62f..ed8fa01f408 100644 --- a/yt/visualization/volume_rendering/camera.py +++ b/yt/visualization/volume_rendering/camera.py @@ -35,7 +35,7 @@ def _sanitize_camera_property_units(value, scene): return scene.arr([scene.arr(v[0], v[1]) for v in value]) else: raise RuntimeError( - "Cannot set camera width to invalid value '%s'" % (value,) + f"Cannot set camera width to invalid value '{value}'" ) return scene.arr(value, "unitary") else: @@ -43,7 +43,7 @@ def _sanitize_camera_property_units(value, scene): return scene.arr([value.d] * 3, value.units).in_units("unitary") elif isinstance(value, numeric_type): return scene.arr([value] * 3, "unitary") - raise RuntimeError("Cannot set camera width to invalid value '%s'" % (value,)) + raise RuntimeError(f"Cannot set camera width to invalid value '{value}'") class Camera(Orientation): @@ -763,5 +763,5 @@ def __repr__(self): self.light, self.resolution, ) - disp += "Lens: %s" % self.lens + disp += f"Lens: {self.lens}" return disp diff --git a/yt/visualization/volume_rendering/image_handling.py b/yt/visualization/volume_rendering/image_handling.py index 869ce31db51..6dd8a2b17ec 100644 --- a/yt/visualization/volume_rendering/image_handling.py +++ b/yt/visualization/volume_rendering/image_handling.py @@ -15,7 +15,7 @@ def export_rgba( if (not h5 and not fits) or (h5 and fits): raise ValueError("Choose either HDF5 or FITS format!") if h5: - f = h5py.File("%s.h5" % fn, mode="w") + f = h5py.File(f"{fn}.h5", mode="w") f.create_dataset("R", data=image[:, :, 0]) f.create_dataset("G", data=image[:, :, 1]) f.create_dataset("B", data=image[:, :, 2]) @@ -30,7 +30,7 @@ def export_rgba( data["b"] = image[:, :, 2] data["a"] = image[:, :, 3] fib = FITSImageData(data) - fib.writeto("%s.fits" % fn, overwrite=True) + fib.writeto(f"{fn}.fits", overwrite=True) def import_rgba(name, h5=True): @@ -95,7 +95,7 @@ def plot_channel( pylab.imshow(image, cmap=mycm, interpolation="nearest") if label is not None: pylab.text(20, 20, label, color=label_color, size=label_size) - pylab.savefig("%s_%s.png" % (name, cmap)) + pylab.savefig(f"{name}_{cmap}.png") pylab.clf() @@ -121,5 +121,5 @@ def plot_rgb(image, name, label=None, label_color="w", label_size="large"): pylab.imshow(image, interpolation="nearest") if label is not None: pylab.text(20, 20, label, color=label_color, size=label_size) - pylab.savefig("%s_rgb.png" % name) + pylab.savefig(f"{name}_rgb.png") pylab.clf() diff --git a/yt/visualization/volume_rendering/input_events.py b/yt/visualization/volume_rendering/input_events.py index 7a733488f9b..c2515751f16 100644 --- a/yt/visualization/volume_rendering/input_events.py +++ b/yt/visualization/volume_rendering/input_events.py @@ -82,7 +82,7 @@ def _add_callback(self, d, func, key, action, mods): if not callable(func): func = event_registry[func] if isinstance(key, str): - key = getattr(glfw, "KEY_%s" % key.upper()) + key = getattr(glfw, f"KEY_{key.upper()}") if isinstance(action, str): action = getattr(glfw, action.upper()) if not isinstance(mods, tuple): @@ -90,7 +90,7 @@ def _add_callback(self, d, func, key, action, mods): mod = 0 for m in mods: if isinstance(m, str): - m = getattr(glfw, "MOD_%s" % m.upper()) + m = getattr(glfw, f"MOD_{m.upper()}") elif m is None: m = 0 mod |= m @@ -248,7 +248,7 @@ def cmap_cycle(event_coll, event): cmap = cm.get_cmap(random.choice(cmap)) event_coll.camera.cmap = np.array(cmap(np.linspace(0, 1, 256)), dtype=np.float32) event_coll.camera.cmap_new = True - print("Setting colormap to {}".format(cmap.name)) + print(f"Setting colormap to {cmap.name}") return True @@ -350,7 +350,7 @@ def print_help(event_coll, event): key_map[glfw.__dict__.get(key)] = key[4:] for cb in (f for f in sorted(event_coll.key_callbacks) if isinstance(f, tuple)): for e in event_coll.key_callbacks[cb]: - print("%s - %s" % (key_map[cb[0]], e.__doc__)) + print(f"{key_map[cb[0]]} - {e.__doc__}") return False diff --git a/yt/visualization/volume_rendering/interactive_vr_helpers.py b/yt/visualization/volume_rendering/interactive_vr_helpers.py index 54485c43a07..d146f35b127 100644 --- a/yt/visualization/volume_rendering/interactive_vr_helpers.py +++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py @@ -66,9 +66,8 @@ def _render_opengl( field = dobj.ds.default_field if field not in dobj.ds.derived_field_list: raise YTSceneFieldNotFound( - """Could not find field '%s' in %s. + f"""Could not find field '{field}' in {dobj.ds}. Please specify a field in create_scene()""" - % (field, dobj.ds) ) mylog.info("Setting default field to %s", field.__repr__()) if window_size is None: diff --git a/yt/visualization/volume_rendering/lens.py b/yt/visualization/volume_rendering/lens.py index 6365ce785f9..1cd3a5cf4ac 100644 --- a/yt/visualization/volume_rendering/lens.py +++ b/yt/visualization/volume_rendering/lens.py @@ -293,9 +293,7 @@ def project_to_plane(self, camera, pos, res=None): return px, py, dz def __repr__(self): - disp = ":\n\tlens_type:perspective\n\tviewpoint:%s" % ( - self.viewpoint - ) + disp = f":\n\tlens_type:perspective\n\tviewpoint:{self.viewpoint}" return disp @@ -520,9 +518,7 @@ def set_viewpoint(self, camera): self.viewpoint = self.front_center def __repr__(self): - disp = ":\n\tlens_type:perspective\n\tviewpoint:%s" % ( - self.viewpoint - ) + disp = f":\n\tlens_type:perspective\n\tviewpoint:{self.viewpoint}" return disp diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 97fe84c0271..17be01b0d2d 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -145,7 +145,7 @@ def off_axis_projection( raise_error = False ptype = sph_ptypes[0] - ppos = ["particle_position_%s" % ax for ax in "xyz"] + ppos = [f"particle_position_{ax}" for ax in "xyz"] # Assure that the field we're trying to off-axis project # has a field type as the SPH particle type or if the field is an # alias to an SPH field or is a 'gas' field diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 72de7d0d8d4..c65826f22cd 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1648,7 +1648,7 @@ def save_image(self, image, fn=None, clim=None, label=None): if self.comm.rank == 0 and fn is not None: # This assumes Density; this is a relatively safe assumption. if label is None: - label = "Projected %s" % (self.fields[0]) + label = f"Projected {self.fields[0]}" if clim is not None: cmin, cmax = clim else: diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 62915fde571..898399b3988 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -414,7 +414,7 @@ def set_sampler(self, camera, interpolated=True): elif self.sampler_type == "projection": sampler = new_projection_sampler(camera, self) else: - NotImplementedError("%s not implemented yet" % self.sampler_type) + NotImplementedError(f"{self.sampler_type} not implemented yet") self.sampler = sampler assert self.sampler is not None @@ -484,8 +484,8 @@ def finalize_image(self, camera, image): return image def __repr__(self): - disp = ":%s " % str(self.data_source) - disp += "transfer_function:%s" % str(self._transfer_function) + disp = f":{str(self.data_source)} " + disp += f"transfer_function:{str(self._transfer_function)}" return disp @@ -794,7 +794,7 @@ def apply_colormap(self): return image def __repr__(self): - disp = ":%s " % str(self.data_source) + disp = f":{str(self.data_source)} " return disp diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index b6b4aa95379..06d7e132151 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -311,14 +311,14 @@ def save(self, fname=None, sigma_clip=None, render=True): field = rs.field else: field = rs.field[-1] - fname = "%s_Render_%s.png" % (basename, field) + fname = f"{basename}_Render_{field}.png" # if no volume source present, use a default filename else: fname = "Render_opaque.png" suffix = get_image_suffix(fname) if suffix == "": suffix = ".png" - fname = "%s%s" % (fname, suffix) + fname = f"{fname}{suffix}" render = self._sanitize_render(render) if render: @@ -341,7 +341,7 @@ def save(self, fname=None, sigma_clip=None, render=True): elif suffix in (".eps", ".ps"): canvas = FigureCanvasPS(fig) else: - raise NotImplementedError("Unknown file suffix '{}'".format(suffix)) + raise NotImplementedError(f"Unknown file suffix '{suffix}'") ax = fig.add_axes([0, 0, 1, 1]) ax.set_axis_off() out = self._last_render @@ -442,14 +442,14 @@ def save_annotated( field = rs.field else: field = rs.field[-1] - fname = "%s_Render_%s.png" % (basename, field) + fname = f"{basename}_Render_{field}.png" # if no volume source present, use a default filename else: fname = "Render_opaque.png" suffix = get_image_suffix(fname) if suffix == "": suffix = ".png" - fname = "%s%s" % (fname, suffix) + fname = f"{fname}{suffix}" render = self._sanitize_render(render) if render: @@ -987,7 +987,7 @@ def __repr__(self): disp = ":" disp += "\nSources: \n" for k, v in self.sources.items(): - disp += " %s: %s\n" % (k, v) + disp += f" {k}: {v}\n" disp += "Camera: \n" - disp += " %s" % self.camera + disp += f" {self.camera}" return disp diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index eba6c4a4adb..901bb6a42d3 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -93,7 +93,7 @@ def _guess_uniform_func(self, value): return func def _set_scalar_uniform(self, kind, size_spec): - gl_func = getattr(GL, "glUniform%s%sv" % (size_spec, kind)) + gl_func = getattr(GL, f"glUniform{size_spec}{kind}v") def _func(location, value): return gl_func(location, 1, value) @@ -102,7 +102,7 @@ def _func(location, value): def _set_matrix_uniform(self, kind, size_spec): assert size_spec[0] == size_spec[1] - gl_func = getattr(GL, "glUniformMatrix%s%sv" % (size_spec[0], kind)) + gl_func = getattr(GL, f"glUniformMatrix{size_spec[0]}{kind}v") def _func(location, value): return gl_func(location, 1, GL.GL_TRUE, value) @@ -183,7 +183,7 @@ def compile(self, source=None, parameters=None): if parameters is not None: raise NotImplementedError source = self._get_source(source) - shader_type_enum = getattr(GL, "GL_%s_SHADER" % self.shader_type.upper()) + shader_type_enum = getattr(GL, f"GL_{self.shader_type.upper()}_SHADER") shader = GL.glCreateShader(shader_type_enum) # We could do templating here if we wanted. self.shader_source = source diff --git a/yt/visualization/volume_rendering/tests/test_camera_attributes.py b/yt/visualization/volume_rendering/tests/test_camera_attributes.py index 96cc2eb7c48..856bbd5eb2f 100644 --- a/yt/visualization/volume_rendering/tests/test_camera_attributes.py +++ b/yt/visualization/volume_rendering/tests/test_camera_attributes.py @@ -63,10 +63,8 @@ def test_scene_and_camera_attributes(): try: # test setters/getters - getattr(cam, "set_%s" % attribute)(attribute_value) - assert_almost_equal( - getattr(cam, "get_%s" % attribute)(), expected_result - ) + getattr(cam, f"set_{attribute}")(attribute_value) + assert_almost_equal(getattr(cam, f"get_{attribute}")(), expected_result) except RuntimeError: assert expected_result is RuntimeError diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index 531eceda710..e8f3a95d209 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -46,7 +46,7 @@ def test_perspective_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_perspective_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_perspective_{self.field[1]}.png", sigma_clip=6.0) def test_stereoperspective_lens(self): sc = Scene() @@ -57,7 +57,7 @@ def test_stereoperspective_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_stereoperspective_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_stereoperspective_{self.field[1]}.png", sigma_clip=6.0) def test_fisheye_lens(self): dd = self.ds.sphere(self.ds.domain_center, self.ds.domain_width[0] / 10) @@ -71,7 +71,7 @@ def test_fisheye_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_fisheye_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_fisheye_{self.field[1]}.png", sigma_clip=6.0) def test_plane_lens(self): dd = self.ds.sphere(self.ds.domain_center, self.ds.domain_width[0] / 10) @@ -83,7 +83,7 @@ def test_plane_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_plane_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_plane_{self.field[1]}.png", sigma_clip=6.0) def test_spherical_lens(self): sc = Scene() @@ -94,7 +94,7 @@ def test_spherical_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_spherical_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_spherical_{self.field[1]}.png", sigma_clip=6.0) def test_stereospherical_lens(self): w = (self.ds.domain_width).in_units("code_length") @@ -107,4 +107,4 @@ def test_stereospherical_lens(self): tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) - sc.save("test_stereospherical_%s.png" % self.field[1], sigma_clip=6.0) + sc.save(f"test_stereospherical_{self.field[1]}.png", sigma_clip=6.0) diff --git a/yt/visualization/volume_rendering/tests/test_mesh_render.py b/yt/visualization/volume_rendering/tests/test_mesh_render.py index a4cdf9886c8..a6f2f2fe650 100644 --- a/yt/visualization/volume_rendering/tests/test_mesh_render.py +++ b/yt/visualization/volume_rendering/tests/test_mesh_render.py @@ -19,7 +19,7 @@ def compare(ds, im, test_prefix, test_name=None, decimals=12): def mesh_render_image_func(filename_prefix): return im.write_image(filename_prefix) - mesh_render_image_func.__name__ = "func_{}".format(test_prefix) + mesh_render_image_func.__name__ = f"func_{test_prefix}" test = GenericImageTest(ds, mesh_render_image_func, decimals) test.prefix = test_prefix test.answer_name = test_name @@ -70,7 +70,7 @@ def test_fake_hexahedral_ds_render(): for field in field_list: sc = create_scene(ds, field) im = sc.render() - test_prefix = "yt_render_fake_hexahedral_%s_%s" % (field[0], field[1]) + test_prefix = f"yt_render_fake_hexahedral_{field[0]}_{field[1]}" yield compare( ds, im, test_prefix=test_prefix, test_name="fake_hexahedral_ds_render" ) @@ -85,9 +85,7 @@ def hex8_render(engine, field): ds = data_dir_load(hex8, kwargs={"step": -1}) sc = create_scene(ds, field) im = sc.render() - return compare( - ds, im, "%s_render_answers_hex8_%s_%s" % (engine, field[0], field[1]) - ) + return compare(ds, im, f"{engine}_render_answers_hex8_{field[0]}_{field[1]}") @requires_ds(hex8) @@ -112,9 +110,7 @@ def tet4_render(engine, field): ds = data_dir_load(tet4, kwargs={"step": -1}) sc = create_scene(ds, field) im = sc.render() - return compare( - ds, im, "%s_render_answers_tet4_%s_%s" % (engine, field[0], field[1]) - ) + return compare(ds, im, f"{engine}_render_answers_tet4_{field[0]}_{field[1]}") @requires_ds(tet4) @@ -139,9 +135,7 @@ def hex20_render(engine, field): ds = data_dir_load(hex20, kwargs={"step": -1}) sc = create_scene(ds, field) im = sc.render() - return compare( - ds, im, "%s_render_answers_hex20_%s_%s" % (engine, field[0], field[1]) - ) + return compare(ds, im, f"{engine}_render_answers_hex20_{field[0]}_{field[1]}") @requires_ds(hex20) @@ -166,9 +160,7 @@ def wedge6_render(engine, field): ds = data_dir_load(wedge6, kwargs={"step": -1}) sc = create_scene(ds, field) im = sc.render() - return compare( - ds, im, "%s_render_answers_wedge6_%s_%s" % (engine, field[0], field[1]) - ) + return compare(ds, im, f"{engine}_render_answers_wedge6_{field[0]}_{field[1]}") @requires_ds(wedge6) @@ -195,9 +187,7 @@ def tet10_render(engine, field): ms = sc.get_source(0) ms.color_bounds = (-0.01, 0.2) im = sc.render() - return compare( - ds, im, "%s_render_answers_tet10_%s_%s" % (engine, field[0], field[1]) - ) + return compare(ds, im, f"{engine}_render_answers_tet10_{field[0]}_{field[1]}") @requires_ds(tet10) @@ -224,7 +214,7 @@ def perspective_mesh_render(engine): cam.set_position(cam_pos, north_vector) cam.resolution = (800, 800) im = sc.render() - return compare(ds, im, "%s_perspective_mesh_render" % engine) + return compare(ds, im, f"{engine}_perspective_mesh_render") @requires_ds(hex8) @@ -255,7 +245,7 @@ def composite_mesh_render(engine): sc.add_source(ms1) sc.add_source(ms2) im = sc.render() - return compare(ds, im, "%s_composite_mesh_render" % engine) + return compare(ds, im, f"{engine}_composite_mesh_render") @requires_ds(hex8) diff --git a/yt/visualization/volume_rendering/tests/test_scene.py b/yt/visualization/volume_rendering/tests/test_scene.py index efb2c2bed2c..7ed4c9d0820 100644 --- a/yt/visualization/volume_rendering/tests/test_scene.py +++ b/yt/visualization/volume_rendering/tests/test_scene.py @@ -71,7 +71,7 @@ def test_rotation(self): tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap="Reds_r") sc.render() for suffix in ["png", "eps", "ps", "pdf"]: - fname = "test_scene.{}".format(suffix) + fname = f"test_scene.{suffix}" sc.save(fname, sigma_clip=6.0) assert_fname(fname) diff --git a/yt/visualization/volume_rendering/tests/test_vr_orientation.py b/yt/visualization/volume_rendering/tests/test_vr_orientation.py index f9973efbc28..dd6a69a6723 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py +++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py @@ -93,6 +93,6 @@ def offaxis_image_func(filename_prefix): return image.write_image(filename_prefix) test5 = GenericImageTest(ds, offaxis_image_func, decimals) - test5.prefix = "oap_orientation_{}".format(i) + test5.prefix = f"oap_orientation_{i}" test5.answer_name = test_name yield test5 diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index 359b7df2f80..af14b24b564 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -669,7 +669,7 @@ def x_format(x, pos): else: return r"$0$" else: - return "%.1g" % (val) + return f"{val:.1g}" else: return label_fmt % (val) @@ -901,7 +901,7 @@ def __repr__(self): % (self.x_bounds[0], self.x_bounds[1], self.nbins) ) for f in self.features: - disp += "\t%s\n" % str(f) + disp += f"\t{str(f)}\n" return disp diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index 0324077a5db..f583942b5e5 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -55,9 +55,8 @@ def create_scene(data_source, field=None, lens_type="plane-parallel"): field = data_source.ds.default_field if field not in data_source.ds.derived_field_list: raise YTSceneFieldNotFound( - """Could not find field '%s' in %s. + f"""Could not find field '{field}' in {data_source.ds}. Please specify a field in create_scene()""" - % (field, data_source.ds) ) mylog.info("Setting default field to %s", field.__repr__()) From a4a881534c060b03241d5e2bb4f3f9a64ca23733 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 14:06:16 +0200 Subject: [PATCH 331/653] Add last commit to git blame ignore revs --- .git-blame-ignore-revs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs index b174b700f8b..44f97f53fa3 100644 --- a/.git-blame-ignore-revs +++ b/.git-blame-ignore-revs @@ -11,4 +11,5 @@ ebadee629414aed2c7b6526e22a419205329ec38 # converting to f-strings ad898e8e3954bc348daaa449d5ed73db778785e9 ef51ad5199692afcf1a8ab491aa115c00c423113 -323ac4ddd4e99d6b951666736d4e9b03b6cfa21e \ No newline at end of file +323ac4ddd4e99d6b951666736d4e9b03b6cfa21e +f7445f02022293f1b089cd8907000301516354bf From 5b21fba3595b37cf27cf3409e775db8c7af0d174 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 14:18:40 +0200 Subject: [PATCH 332/653] Adding pre-commit --- .pre-commit-config.yaml | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index acdc055eefe..d98662753c7 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,3 +11,10 @@ rev: '3.8.1' # keep in sync with tests/lint_requirements.txt hooks: - id: flake8 +- repo: local + hooks: + - id: flynt + name: flynt + entry: flynt + language: system + types: [python] From b928099d0f42ebd01a912f7cefe9f8ec6ca49ac0 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 14:21:54 +0200 Subject: [PATCH 333/653] Linting with flynt --- .travis.yml | 5 +++++ tests/lint_requirements.txt | 1 + 2 files changed, 6 insertions(+) diff --git a/.travis.yml b/.travis.yml index 9c3b7d7f570..9c5ea9d5291 100644 --- a/.travis.yml +++ b/.travis.yml @@ -93,6 +93,11 @@ jobs: python: 3.6 script: black --check yt/ + - stage: Lint + name: "flynt" + python: 3.6 + script: flynt --fail-on-change -n + - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" python: 3.6 diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index aaf97225adb..0db50b0c78f 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,3 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear +-e git+https://github.com/cphyc/flynt.git@dry-run-mode#egg=flynt From bcc58f0f8825e2d95b295862aca18b93e941740c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 15:00:02 +0200 Subject: [PATCH 334/653] forgot argument --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 9c5ea9d5291..615fd540563 100644 --- a/.travis.yml +++ b/.travis.yml @@ -96,7 +96,7 @@ jobs: - stage: Lint name: "flynt" python: 3.6 - script: flynt --fail-on-change -n + script: flynt --fail-on-change -n yt/ - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" From 4ca5886042c548c6a67310eb9e5b87cf5d778e75 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 15:29:31 +0200 Subject: [PATCH 335/653] Explicit is better than implicit MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 615fd540563..1b973ca6738 100644 --- a/.travis.yml +++ b/.travis.yml @@ -96,7 +96,7 @@ jobs: - stage: Lint name: "flynt" python: 3.6 - script: flynt --fail-on-change -n yt/ + script: flynt --fail-on-change --dry-run yt/ - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" From 59b13c5a02267973c02f5430361c17cc07356ce9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 15:32:39 +0200 Subject: [PATCH 336/653] Mention in CONTRIBUTING.rst --- CONTRIBUTING.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 3d540fccb7b..08c88bee8ab 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -52,10 +52,10 @@ of a repository; in this case the fork will live in the space under your username on github, rather than the ``yt-project``. If you have never made a fork of a repository on github, or are unfamiliar with this process, here is a short article about how to do so: -https://help.github.com/en/github/getting-started-with-github/fork-a-repo . +https://help.github.com/en/github/getting-started-with-github/fork-a-repo . The documentation for ``yt`` lives in the ``doc`` directory in the root of the yt git -repository. To make a contribution to the yt documentation you will +repository. To make a contribution to the yt documentation you will make your changes in your own fork of ``yt``. When you are done, issue a pull request through the website for your new fork, and we can comment back and forth and eventually accept your changes. See :ref:`sharing-changes` for @@ -87,7 +87,7 @@ usually we end up accepting. For more information, see :ref:`contributing-code`, where we spell out how to get up and running with a development environment, how to commit, and how to -use GitHub. When you're ready to share your changes with the community, refer to +use GitHub. When you're ready to share your changes with the community, refer to :ref:`sharing-changes` to see how to contribute them back upstream. Online Presence @@ -337,12 +337,12 @@ revision specifier will not show more recent changes to the repository. An alternative option is to use ``checkout`` on a branch. In yt the ``master`` branch is our primary development branch, so checking out ``master`` should return you to the tip (or most up-to-date revision specifier) on the ``master`` -branch. +branch. .. code-block:: bash - + $ git checkout master - + Lastly, if you want to use this new downloaded version of your yt repository as the *active* version of yt on your computer (i.e. the one which is executed when you run yt from the command line or the one that is loaded when you do ``import @@ -723,8 +723,8 @@ Below are a list of rules for coding style in yt. Some of these rules are suggestions are not explicitly enforced, while some are enforced via automated testing. -The yt project uses ``flake8`` to report on code correctness (syntax + anti-pattern -detection), and ``black`` for automated formatting. +The yt project uses ``flake8`` and ``flynt`` to report on code correctness (syntax + +anti-pattern detection), and ``black`` for automated formatting. To check the coding style of your contributions locally you will need to install those tools, which can be done for instance with ``pip``: @@ -738,6 +738,7 @@ Then run the checks from the top level of the repository with .. code-block:: bash $ flake8 yt/ + $ flynt --fail-on-change --dry-run yt/ $ black --check These will respectively print out any ``flake8`` errors or warnings that your newly added From 535134f266e56bce75b6f6a366e2a9d256320e8c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 15:48:35 +0200 Subject: [PATCH 337/653] Improve CONTRIBUTING --- CONTRIBUTING.rst | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 08c88bee8ab..2c1c19e3bbb 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -782,7 +782,7 @@ If you wish to automate this process you may be interested in using `pre-commit $ pre-commit install -So that ``black``, ``flake8`` and ``isort`` will run and update your changes every time +So that ``black``, ``flynt`, ``flake8`` and ``isort`` will run and update your changes every time you commit new code. This setup is not required so you have the option of checking for code style only in the late stage of a branch when we need to validate it for merging. @@ -823,6 +823,9 @@ Source code style guide return value of a function, do not store it in a variable. * Add tests for new functionality. When fixing a bug, consider adding a test to prevent the bug from recurring. + * Use f-strings where possible (https://www.python.org/dev/peps/pep-0498/), except + in logging function where the recommended syntax is + ``mylog.info("Something %s", "value")``. API Style Guide --------------- From 8ddd519b2c347e2dcd63a0b0e7886cd7ffad4af9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 7 Aug 2020 15:57:46 +0200 Subject: [PATCH 338/653] Reword CONTRIBUTING.rst MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- CONTRIBUTING.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 2c1c19e3bbb..fcec4870164 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -823,7 +823,7 @@ Source code style guide return value of a function, do not store it in a variable. * Add tests for new functionality. When fixing a bug, consider adding a test to prevent the bug from recurring. - * Use f-strings where possible (https://www.python.org/dev/peps/pep-0498/), except + * Use f-strings for string-formatting (https://www.python.org/dev/peps/pep-0498/), except in logging function where the recommended syntax is ``mylog.info("Something %s", "value")``. From 8acbf01dc14d217737e9727e6064e14ddeaf70f4 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 13:36:13 -0500 Subject: [PATCH 339/653] Extract and rename GenerationInProgress --- yt/data_objects/data_containers.py | 11 +++-------- yt/frontends/ytdata/data_structures.py | 5 ++--- yt/utilities/exceptions.py | 6 ++++++ 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index ff661ae035f..4ecc6a8d5c4 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -38,6 +38,7 @@ YTFieldTypeNotFound, YTFieldUnitError, YTFieldUnitParseError, + YTGenerationInProgress, YTNonIndexedDataContainer, YTSpatialFieldUnitError, ) @@ -1527,12 +1528,6 @@ def blocks(self): o.field_parameters = cache_fp -class GenerationInProgress(Exception): - def __init__(self, fields): - self.fields = fields - super(GenerationInProgress, self).__init__() - - class YTSelectionContainer(YTDataContainer, ParallelAnalysisInterface): _locked = False _sort_by = None @@ -1675,7 +1670,7 @@ def get_data(self, fields=None): if len(fields_to_get) == 0 and len(fields_to_generate) == 0: return elif self._locked: - raise GenerationInProgress(fields) + raise YTGenerationInProgress(fields) # Track which ones we want in the end ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) # At this point, we want to figure out *all* our dependencies. @@ -1771,7 +1766,7 @@ def _generate_fields(self, fields_to_generate): except UnitParseError: raise YTFieldUnitParseError(fi) self.field_data[field] = fd - except GenerationInProgress as gip: + except YTGenerationInProgress as gip: for f in gip.fields: if f not in fields_to_generate: fields_to_generate.append(f) diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 57bd7c1673a..a75f87ef19c 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -5,7 +5,6 @@ import numpy as np -from yt.data_objects.data_containers import GenerationInProgress from yt.data_objects.grid_patch import AMRGridPatch from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.profiles import ( @@ -21,7 +20,7 @@ from yt.units import dimensions from yt.units.unit_registry import UnitRegistry from yt.units.yt_array import YTQuantity, uconcatenate -from yt.utilities.exceptions import YTFieldTypeNotFound +from yt.utilities.exceptions import YTFieldTypeNotFound, YTGenerationInProgress from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only @@ -625,7 +624,7 @@ def get_data(self, fields=None): if len(fields_to_get) == 0 and len(fields_to_generate) == 0: return elif self._locked: - raise GenerationInProgress(fields) + raise YTGenerationInProgress(fields) # Track which ones we want in the end ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) # At this point, we want to figure out *all* our dependencies. diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 1f1972f3d87..f10a615bb37 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -872,3 +872,9 @@ def __str__(self): msg += "We do not support displaying arrays larger\n" msg += "than size %s." % self.max_size return msg + + +class YTGenerationInProgress(Exception): + def __init__(self, fields): + self.fields = fields + super(YTGenerationInProgress, self).__init__() From 330aa663e239a978b33862d190634ddd739a8f95 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 20:27:55 +0200 Subject: [PATCH 340/653] add three labels to mergeable list ('proposal', 'dead code' and 'refactor') --- .github/mergeable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergeable.yml b/.github/mergeable.yml index 546019e5c9e..f22ba50ba63 100644 --- a/.github/mergeable.yml +++ b/.github/mergeable.yml @@ -9,5 +9,5 @@ mergeable: message: "WIP pull requests can't be merged." - do: label must_include: - regex: 'bug|enhancement|new feature|docs|infrastructure' + regex: 'bug|enhancement|new feature|docs|infrastructure|dead code|refactor|proposal' message: "Please label this pull request with one of: bug, enhancement, new feature, docs or infrastructure." From 653c19b3eb7680d45cbec593ad9ad342f41903fc Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 14:13:02 -0500 Subject: [PATCH 341/653] Move data objects to subdirectory --- yt/data_objects/api.py | 2 +- .../construction_data_containers.py | 4 +- yt/data_objects/data_containers.py | 1462 +---------------- yt/data_objects/grid_patch.py | 2 +- yt/data_objects/octree_subset.py | 2 +- yt/data_objects/particle_container.py | 3 +- yt/data_objects/selection_data_containers.py | 1332 --------------- yt/data_objects/selection_objects/__init__.py | 13 + .../selection_objects/base_objects.py | 1411 ++++++++++++++++ .../selection_objects/boolean_operations.py | 149 ++ .../selection_objects/cut_region.py | 247 +++ yt/data_objects/selection_objects/disk.py | 95 ++ .../selection_objects/object_collection.py | 31 + yt/data_objects/selection_objects/point.py | 52 + yt/data_objects/selection_objects/ray.py | 227 +++ yt/data_objects/selection_objects/region.py | 71 + yt/data_objects/selection_objects/slices.py | 366 +++++ .../selection_objects/spheroids.py | 231 +++ yt/data_objects/unstructured_mesh.py | 2 +- yt/frontends/adaptahop/data_structures.py | 2 +- yt/frontends/gadget_fof/data_structures.py | 2 +- .../coordinates/cartesian_coordinates.py | 3 +- yt/utilities/answer_testing/utils.py | 2 +- yt/utilities/particle_generator.py | 2 +- yt/visualization/plot_modifications.py | 2 +- yt/visualization/plot_window.py | 2 +- yt/visualization/profile_plotter.py | 2 +- yt/visualization/volume_rendering/utils.py | 2 +- 28 files changed, 2912 insertions(+), 2809 deletions(-) delete mode 100644 yt/data_objects/selection_data_containers.py create mode 100644 yt/data_objects/selection_objects/__init__.py create mode 100644 yt/data_objects/selection_objects/base_objects.py create mode 100644 yt/data_objects/selection_objects/boolean_operations.py create mode 100644 yt/data_objects/selection_objects/cut_region.py create mode 100644 yt/data_objects/selection_objects/disk.py create mode 100644 yt/data_objects/selection_objects/object_collection.py create mode 100644 yt/data_objects/selection_objects/point.py create mode 100644 yt/data_objects/selection_objects/ray.py create mode 100644 yt/data_objects/selection_objects/region.py create mode 100644 yt/data_objects/selection_objects/slices.py create mode 100644 yt/data_objects/selection_objects/spheroids.py diff --git a/yt/data_objects/api.py b/yt/data_objects/api.py index f0f94d5ad01..859d4a23e8e 100644 --- a/yt/data_objects/api.py +++ b/yt/data_objects/api.py @@ -1,4 +1,4 @@ -from . import construction_data_containers as __cdc, selection_data_containers as __sdc +from . import construction_data_containers as __cdc, selection_objects as __sdc from .analyzer_objects import AnalysisTask, analysis_task from .grid_patch import AMRGridPatch from .image_array import ImageArray diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index e09e3aff35c..0719adaa8fd 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -9,12 +9,12 @@ import numpy as np from yt.config import ytcfg -from yt.data_objects.data_containers import ( +from yt.data_objects.field_data import YTFieldData +from yt.data_objects.selection_objects.base_objects import ( YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, ) -from yt.data_objects.field_data import YTFieldData from yt.extern.tqdm import tqdm from yt.fields.field_exceptions import NeedsGridType, NeedsOriginalGrid from yt.frontends.sph.data_structures import ParticleDataset diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 4ecc6a8d5c4..0ad5539ded9 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -1,56 +1,29 @@ -import itertools import os import shelve -import uuid import weakref -from collections import defaultdict from contextlib import contextmanager import numpy as np -from unyt.exceptions import UnitConversionError, UnitParseError -import yt.geometry.selection_routines from yt.data_objects.field_data import YTFieldData from yt.data_objects.profiles import create_profile from yt.fields.field_exceptions import NeedsGridType from yt.frontends.ytdata.utilities import save_as_dataset -from yt.funcs import ( - ensure_list, - fix_axis, - get_output_filename, - iterable, - mylog, - validate_width_tuple, -) -from yt.geometry.selection_routines import compose_selector -from yt.units import dimensions as ytdims +from yt.funcs import ensure_list, get_output_filename, iterable, mylog from yt.units.yt_array import YTArray, YTQuantity, uconcatenate from yt.utilities.amr_kdtree.api import AMRKDTree from yt.utilities.exceptions import ( - YTBooleanObjectError, - YTBooleanObjectsWrongDataset, YTCouldNotGenerateField, - YTDataSelectorNotImplemented, - YTDimensionalityError, YTException, YTFieldNotFound, YTFieldNotParseable, YTFieldTypeNotFound, - YTFieldUnitError, - YTFieldUnitParseError, - YTGenerationInProgress, YTNonIndexedDataContainer, YTSpatialFieldUnitError, ) -from yt.utilities.lib.marching_cubes import march_cubes_grid, march_cubes_grid_flux from yt.utilities.object_registries import data_object_registry -from yt.utilities.parallel_tools.parallel_analysis_interface import ( - ParallelAnalysisInterface, -) from yt.utilities.parameter_file_storage import ParameterFileStore -from .derived_quantities import DerivedQuantityCollection - def sanitize_weight_field(ds, field, weight): field_object = ds._get_field_info(field) @@ -1528,1439 +1501,6 @@ def blocks(self): o.field_parameters = cache_fp -class YTSelectionContainer(YTDataContainer, ParallelAnalysisInterface): - _locked = False - _sort_by = None - _selector = None - _current_chunk = None - _data_source = None - _dimensionality = None - _max_level = None - _min_level = None - - def __init__(self, ds, field_parameters, data_source=None): - ParallelAnalysisInterface.__init__(self) - super(YTSelectionContainer, self).__init__(ds, field_parameters) - self._data_source = data_source - if data_source is not None: - if data_source.ds != self.ds: - raise RuntimeError( - "Attempted to construct a DataContainer with a data_source " - "from a different Dataset", - ds, - data_source.ds, - ) - if data_source._dimensionality < self._dimensionality: - raise RuntimeError( - "Attempted to construct a DataContainer with a data_source " - "of lower dimensionality (%u vs %u)" - % (data_source._dimensionality, self._dimensionality) - ) - self.field_parameters.update(data_source.field_parameters) - self.quantities = DerivedQuantityCollection(self) - - @property - def selector(self): - if self._selector is not None: - return self._selector - s_module = getattr(self, "_selector_module", yt.geometry.selection_routines) - sclass = getattr(s_module, "%s_selector" % self._type_name, None) - if sclass is None: - raise YTDataSelectorNotImplemented(self._type_name) - - if self._data_source is not None: - self._selector = compose_selector( - self, self._data_source.selector, sclass(self) - ) - else: - self._selector = sclass(self) - return self._selector - - def chunks(self, fields, chunking_style, **kwargs): - # This is an iterator that will yield the necessary chunks. - self.get_data() # Ensure we have built ourselves - if fields is None: - fields = [] - # chunk_ind can be supplied in the keyword arguments. If it's a - # scalar, that'll be the only chunk that gets returned; if it's a list, - # those are the ones that will be. - chunk_ind = kwargs.pop("chunk_ind", None) - if chunk_ind is not None: - chunk_ind = ensure_list(chunk_ind) - for ci, chunk in enumerate(self.index._chunk(self, chunking_style, **kwargs)): - if chunk_ind is not None and ci not in chunk_ind: - continue - with self._chunked_read(chunk): - self.get_data(fields) - # NOTE: we yield before releasing the context - yield self - - def _identify_dependencies(self, fields_to_get, spatial=False): - inspected = 0 - fields_to_get = fields_to_get[:] - for field in itertools.cycle(fields_to_get): - if inspected >= len(fields_to_get): - break - inspected += 1 - fi = self.ds._get_field_info(*field) - fd = self.ds.field_dependencies.get( - field, None - ) or self.ds.field_dependencies.get(field[1], None) - # This is long overdue. Any time we *can't* find a field - # dependency -- for instance, if the derived field has been added - # after dataset instantiation -- let's just try to - # recalculate it. - if fd is None: - try: - fd = fi.get_dependencies(ds=self.ds) - self.ds.field_dependencies[field] = fd - except Exception: - continue - requested = self._determine_fields(list(set(fd.requested))) - deps = [d for d in requested if d not in fields_to_get] - fields_to_get += deps - return sorted(fields_to_get) - - def get_data(self, fields=None): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - if fields is None: - return - nfields = [] - apply_fields = defaultdict(list) - for field in self._determine_fields(fields): - # We need to create the field on the raw particle types - # for particles types (when the field is not directly - # defined for the derived particle type only) - finfo = self.ds.field_info[field] - - if ( - field[0] in self.ds.filtered_particle_types - and finfo._inherited_particle_filter - ): - f = self.ds.known_filters[field[0]] - apply_fields[field[0]].append((f.filtered_type, field[1])) - else: - nfields.append(field) - for filter_type in apply_fields: - f = self.ds.known_filters[filter_type] - with f.apply(self): - self.get_data(apply_fields[filter_type]) - fields = nfields - if len(fields) == 0: - return - # Now we collect all our fields - # Here is where we need to perform a validation step, so that if we - # have a field requested that we actually *can't* yet get, we put it - # off until the end. This prevents double-reading fields that will - # need to be used in spatial fields later on. - fields_to_get = [] - # This will be pre-populated with spatial fields - fields_to_generate = [] - for field in self._determine_fields(fields): - if field in self.field_data: - continue - finfo = self.ds._get_field_info(*field) - try: - finfo.check_available(self) - except NeedsGridType: - fields_to_generate.append(field) - continue - fields_to_get.append(field) - if len(fields_to_get) == 0 and len(fields_to_generate) == 0: - return - elif self._locked: - raise YTGenerationInProgress(fields) - # Track which ones we want in the end - ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) - # At this point, we want to figure out *all* our dependencies. - fields_to_get = self._identify_dependencies(fields_to_get, self._spatial) - # We now split up into readers for the types of fields - fluids, particles = [], [] - finfos = {} - for ftype, fname in fields_to_get: - finfo = self.ds._get_field_info(ftype, fname) - finfos[ftype, fname] = finfo - if finfo.sampling_type == "particle": - particles.append((ftype, fname)) - elif (ftype, fname) not in fluids: - fluids.append((ftype, fname)) - # The _read method will figure out which fields it needs to get from - # disk, and return a dict of those fields along with the fields that - # need to be generated. - read_fluids, gen_fluids = self.index._read_fluid_fields( - fluids, self, self._current_chunk - ) - for f, v in read_fluids.items(): - self.field_data[f] = self.ds.arr(v, units=finfos[f].units) - self.field_data[f].convert_to_units(finfos[f].output_units) - - read_particles, gen_particles = self.index._read_particle_fields( - particles, self, self._current_chunk - ) - - for f, v in read_particles.items(): - self.field_data[f] = self.ds.arr(v, units=finfos[f].units) - self.field_data[f].convert_to_units(finfos[f].output_units) - - fields_to_generate += gen_fluids + gen_particles - self._generate_fields(fields_to_generate) - for field in list(self.field_data.keys()): - if field not in ofields: - self.field_data.pop(field) - - def _generate_fields(self, fields_to_generate): - index = 0 - with self._field_lock(): - # At this point, we assume that any fields that are necessary to - # *generate* a field are in fact already available to us. Note - # that we do not make any assumption about whether or not the - # fields have a spatial requirement. This will be checked inside - # _generate_field, at which point additional dependencies may - # actually be noted. - while any(f not in self.field_data for f in fields_to_generate): - field = fields_to_generate[index % len(fields_to_generate)] - index += 1 - if field in self.field_data: - continue - fi = self.ds._get_field_info(*field) - try: - fd = self._generate_field(field) - if hasattr(fd, "units"): - fd.units.registry = self.ds.unit_registry - if fd is None: - raise RuntimeError - if fi.units is None: - # first time calling a field with units='auto', so we - # infer the units from the units of the data we get back - # from the field function and use these units for future - # field accesses - units = getattr(fd, "units", "") - if units == "": - dimensions = ytdims.dimensionless - else: - dimensions = units.dimensions - units = str( - units.get_base_equivalent(self.ds.unit_system.name) - ) - if fi.dimensions != dimensions: - raise YTDimensionalityError(fi.dimensions, dimensions) - fi.units = units - self.field_data[field] = self.ds.arr(fd, units) - msg = ( - "Field %s was added without specifying units, " - "assuming units are %s" - ) - mylog.warning(msg % (fi.name, units)) - try: - fd.convert_to_units(fi.units) - except AttributeError: - # If the field returns an ndarray, coerce to a - # dimensionless YTArray and verify that field is - # supposed to be unitless - fd = self.ds.arr(fd, "") - if fi.units != "": - raise YTFieldUnitError(fi, fd.units) - except UnitConversionError: - raise YTFieldUnitError(fi, fd.units) - except UnitParseError: - raise YTFieldUnitParseError(fi) - self.field_data[field] = fd - except YTGenerationInProgress as gip: - for f in gip.fields: - if f not in fields_to_generate: - fields_to_generate.append(f) - - def __or__(self, other): - if not isinstance(other, YTSelectionContainer): - raise YTBooleanObjectError(other) - if self.ds is not other.ds: - raise YTBooleanObjectsWrongDataset() - # Should maybe do something with field parameters here - return YTBooleanContainer("OR", self, other, ds=self.ds) - - def __invert__(self): - # ~obj - asel = yt.geometry.selection_routines.AlwaysSelector(self.ds) - return YTBooleanContainer("NOT", self, asel, ds=self.ds) - - def __xor__(self, other): - if not isinstance(other, YTSelectionContainer): - raise YTBooleanObjectError(other) - if self.ds is not other.ds: - raise YTBooleanObjectsWrongDataset() - return YTBooleanContainer("XOR", self, other, ds=self.ds) - - def __and__(self, other): - if not isinstance(other, YTSelectionContainer): - raise YTBooleanObjectError(other) - if self.ds is not other.ds: - raise YTBooleanObjectsWrongDataset() - return YTBooleanContainer("AND", self, other, ds=self.ds) - - def __add__(self, other): - return self.__or__(other) - - def __sub__(self, other): - if not isinstance(other, YTSelectionContainer): - raise YTBooleanObjectError(other) - if self.ds is not other.ds: - raise YTBooleanObjectsWrongDataset() - return YTBooleanContainer("NEG", self, other, ds=self.ds) - - @contextmanager - def _field_lock(self): - self._locked = True - yield - self._locked = False - - @contextmanager - def _ds_hold(self, new_ds): - """ - This contextmanager is used to take a data object and preserve its - attributes but allow the dataset that underlies it to be swapped out. - This is typically only used internally, and differences in unit systems - may present interesting possibilities. - """ - old_ds = self.ds - old_index = self._index - self.ds = new_ds - self._index = new_ds.index - old_chunk_info = self._chunk_info - old_chunk = self._current_chunk - old_size = self.size - self._chunk_info = None - self._current_chunk = None - self.size = None - self._index._identify_base_chunk(self) - with self._chunked_read(None): - yield - self._index = old_index - self.ds = old_ds - self._chunk_info = old_chunk_info - self._current_chunk = old_chunk - self.size = old_size - - @contextmanager - def _chunked_read(self, chunk): - # There are several items that need to be swapped out - # field_data, size, shape - obj_field_data = [] - if hasattr(chunk, "objs"): - for obj in chunk.objs: - obj_field_data.append(obj.field_data) - obj.field_data = YTFieldData() - old_field_data, self.field_data = self.field_data, YTFieldData() - old_chunk, self._current_chunk = self._current_chunk, chunk - old_locked, self._locked = self._locked, False - yield - self.field_data = old_field_data - self._current_chunk = old_chunk - self._locked = old_locked - if hasattr(chunk, "objs"): - for obj in chunk.objs: - obj.field_data = obj_field_data.pop(0) - - @contextmanager - def _activate_cache(self): - cache = self._field_cache or {} - old_fields = {} - for field in (f for f in cache if f in self.field_data): - old_fields[field] = self.field_data[field] - self.field_data.update(cache) - yield - for field in cache: - self.field_data.pop(field) - if field in old_fields: - self.field_data[field] = old_fields.pop(field) - self._field_cache = None - - def _initialize_cache(self, cache): - # Wipe out what came before - self._field_cache = {} - self._field_cache.update(cache) - - @property - def icoords(self): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - return self._current_chunk.icoords - - @property - def fcoords(self): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - return self._current_chunk.fcoords - - @property - def ires(self): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - return self._current_chunk.ires - - @property - def fwidth(self): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - return self._current_chunk.fwidth - - @property - def fcoords_vertex(self): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - return self._current_chunk.fcoords_vertex - - @property - def max_level(self): - if self._max_level is None: - try: - return self.ds.max_level - except AttributeError: - return None - return self._max_level - - @max_level.setter - def max_level(self, value): - if self._selector is not None: - del self._selector - self._selector = None - self._current_chunk = None - self.size = None - self.shape = None - self.field_data.clear() - self._max_level = value - - @property - def min_level(self): - if self._min_level is None: - try: - return 0 - except AttributeError: - return None - return self._min_level - - @min_level.setter - def min_level(self, value): - if self._selector is not None: - del self._selector - self._selector = None - self.field_data.clear() - self.size = None - self.shape = None - self._current_chunk = None - self._min_level = value - - -class YTSelectionContainer0D(YTSelectionContainer): - _spatial = False - _dimensionality = 0 - - def __init__(self, ds, field_parameters=None, data_source=None): - super(YTSelectionContainer0D, self).__init__(ds, field_parameters, data_source) - - -class YTSelectionContainer1D(YTSelectionContainer): - _spatial = False - _dimensionality = 1 - - def __init__(self, ds, field_parameters=None, data_source=None): - super(YTSelectionContainer1D, self).__init__(ds, field_parameters, data_source) - self._grids = None - self._sortkey = None - self._sorted = {} - - -class YTSelectionContainer2D(YTSelectionContainer): - _key_fields = ["px", "py", "pdx", "pdy"] - _dimensionality = 2 - """ - Prepares the YTSelectionContainer2D, normal to *axis*. If *axis* is 4, we are not - aligned with any axis. - """ - _spatial = False - - def __init__(self, axis, ds, field_parameters=None, data_source=None): - super(YTSelectionContainer2D, self).__init__(ds, field_parameters, data_source) - # We need the ds, which will exist by now, for fix_axis. - self.axis = fix_axis(axis, self.ds) - self.set_field_parameter("axis", axis) - - def _convert_field_name(self, field): - return field - - def _get_pw(self, fields, center, width, origin, plot_type): - from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb - from yt.visualization.plot_window import PWViewerMPL, get_window_parameters - - axis = self.axis - skip = self._key_fields - skip += list(set(frb._exclude_fields).difference(set(self._key_fields))) - self.fields = [k for k in self.field_data if k not in skip] - if fields is not None: - self.fields = ensure_list(fields) + self.fields - if len(self.fields) == 0: - raise ValueError("No fields found to plot in get_pw") - (bounds, center, display_center) = get_window_parameters( - axis, center, width, self.ds - ) - pw = PWViewerMPL( - self, - bounds, - fields=self.fields, - origin=origin, - frb_generator=frb, - plot_type=plot_type, - ) - pw._setup_plots() - return pw - - def to_frb(self, width, resolution, center=None, height=None, periodic=False): - r"""This function returns a FixedResolutionBuffer generated from this - object. - - A FixedResolutionBuffer is an object that accepts a variable-resolution - 2D object and transforms it into an NxM bitmap that can be plotted, - examined or processed. This is a convenience function to return an FRB - directly from an existing 2D data object. - - Parameters - ---------- - width : width specifier - This can either be a floating point value, in the native domain - units of the simulation, or a tuple of the (value, unit) style. - This will be the width of the FRB. - height : height specifier - This will be the physical height of the FRB, by default it is equal - to width. Note that this will not make any corrections to - resolution for the aspect ratio. - resolution : int or tuple of ints - The number of pixels on a side of the final FRB. If iterable, this - will be the width then the height. - center : array-like of floats, optional - The center of the FRB. If not specified, defaults to the center of - the current object. - periodic : bool - Should the returned Fixed Resolution Buffer be periodic? (default: - False). - - Returns - ------- - frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` - A fixed resolution buffer, which can be queried for fields. - - Examples - -------- - - >>> proj = ds.proj("Density", 0) - >>> frb = proj.to_frb( (100.0, 'kpc'), 1024) - >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png') - """ - - if (self.ds.geometry == "cylindrical" and self.axis == 1) or ( - self.ds.geometry == "polar" and self.axis == 2 - ): - if center is not None and center != (0.0, 0.0): - raise NotImplementedError( - "Currently we only support images centered at R=0. " - + "We plan to generalize this in the near future" - ) - from yt.visualization.fixed_resolution import ( - CylindricalFixedResolutionBuffer, - ) - - validate_width_tuple(width) - if iterable(resolution): - resolution = max(resolution) - frb = CylindricalFixedResolutionBuffer(self, width, resolution) - return frb - - if center is None: - center = self.center - if center is None: - center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.0 - elif iterable(center) and not isinstance(center, YTArray): - center = self.ds.arr(center, "code_length") - if iterable(width): - w, u = width - if isinstance(w, tuple) and isinstance(u, tuple): - height = u - w, u = w - width = self.ds.quan(w, units=u) - elif not isinstance(width, YTArray): - width = self.ds.quan(width, "code_length") - if height is None: - height = width - elif iterable(height): - h, u = height - height = self.ds.quan(h, units=u) - elif not isinstance(height, YTArray): - height = self.ds.quan(height, "code_length") - if not iterable(resolution): - resolution = (resolution, resolution) - from yt.visualization.fixed_resolution import FixedResolutionBuffer - - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - bounds = ( - center[xax] - width * 0.5, - center[xax] + width * 0.5, - center[yax] - height * 0.5, - center[yax] + height * 0.5, - ) - frb = FixedResolutionBuffer(self, bounds, resolution, periodic=periodic) - return frb - - -class YTSelectionContainer3D(YTSelectionContainer): - """ - Returns an instance of YTSelectionContainer3D, or prepares one. Usually only - used as a base class. Note that *center* is supplied, but only used - for fields and quantities that require it. - """ - - _key_fields = ["x", "y", "z", "dx", "dy", "dz"] - _spatial = False - _num_ghost_zones = 0 - _dimensionality = 3 - - def __init__(self, center, ds, field_parameters=None, data_source=None): - super(YTSelectionContainer3D, self).__init__(ds, field_parameters, data_source) - self._set_center(center) - self.coords = None - self._grids = None - - def cut_region(self, field_cuts, field_parameters=None, locals=None): - """ - Return a YTCutRegion, where the a cell is identified as being inside - the cut region based on the value of one or more fields. Note that in - previous versions of yt the name 'grid' was used to represent the data - object used to construct the field cut, as of yt 3.0, this has been - changed to 'obj'. - - Parameters - ---------- - field_cuts : list of strings - A list of conditionals that will be evaluated. In the namespace - available, these conditionals will have access to 'obj' which is a - data object of unknown shape, and they must generate a boolean array. - For instance, conditionals = ["obj['temperature'] < 1e3"] - field_parameters : dictionary - A dictionary of field parameters to be used when applying the field - cuts. - locals : dictionary - A dictionary of local variables to use when defining the cut region. - - Examples - -------- - To find the total mass of hot gas with temperature greater than 10^6 K - in your volume: - - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.cut_region(["obj['temperature'] > 1e6"]) - >>> print(cr.quantities.total_quantity("cell_mass").in_units('Msun')) - """ - if locals is None: - locals = {} - cr = self.ds.cut_region( - self, field_cuts, field_parameters=field_parameters, locals=locals - ) - return cr - - def exclude_above(self, field, value, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field is above a given value are masked. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field above the given value masked. - - Example - ------- - - To find the total mass of hot gas with temperature colder than 10^6 K - in your volume: - - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_above('temperature', 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - - """ - if units is None: - field_cuts = 'obj["' + field + '"] <= ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") <= ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def include_above(self, field, value, units=None): - """ - This function will return a YTCutRegion where only the regions - whose field is above a given value are included. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field above the given value masked. - - Example - ------- - - To find the total mass of hot gas with temperature warmer than 10^6 K - in your volume: - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.include_above('temperature', 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - - if units is None: - field_cuts = 'obj["' + field + '"] > ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") > ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def exclude_equal(self, field, value, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field are equal to given value are masked. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field equal to the given value masked. - - Example - ------- - - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_equal('temperature', 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = 'obj["' + field + '"] != ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") != ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def include_equal(self, field, value, units=None): - """ - This function will return a YTCutRegion where only the regions - whose field are equal to given value are included. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field equal to the given value included. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.include_equal('temperature', 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = 'obj["' + field + '"] == ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") == ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def exclude_inside(self, field, min_value, max_value, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field are inside the interval from min_value to max_value. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - min_value : float - The minimum value inside the interval to be excluded. - max_value : float - The maximum value inside the interval to be excluded. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field inside the given interval excluded. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_inside('temperature', 1e5, 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = ( - '(obj["' - + field - + '"] <= ' - + str(min_value) - + ') | (obj["' - + field - + '"] >= ' - + str(max_value) - + ")" - ) - else: - field_cuts = ( - '(obj["' - + field - + '"].in_units("' - + units - + '") <= ' - + str(min_value) - + ') | (obj["' - + field - + '"].in_units("' - + units - + '") >= ' - + str(max_value) - + ")" - ) - cr = self.cut_region(field_cuts) - return cr - - def include_inside(self, field, min_value, max_value, units=None): - """ - This function will return a YTCutRegion where only the regions - whose field are inside the interval from min_value to max_value are - included. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - min_value : float - The minimum value inside the interval to be excluded. - max_value : float - The maximum value inside the interval to be excluded. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field inside the given interval excluded. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.include_inside('temperature', 1e5, 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = ( - '(obj["' - + field - + '"] > ' - + str(min_value) - + ') & (obj["' - + field - + '"] < ' - + str(max_value) - + ")" - ) - else: - field_cuts = ( - '(obj["' - + field - + '"].in_units("' - + units - + '") > ' - + str(min_value) - + ') & (obj["' - + field - + '"].in_units("' - + units - + '") < ' - + str(max_value) - + ")" - ) - cr = self.cut_region(field_cuts) - return cr - - def exclude_outside(self, field, min_value, max_value, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field are outside the interval from min_value to max_value. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - min_value : float - The minimum value inside the interval to be excluded. - max_value : float - The maximum value inside the interval to be excluded. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field outside the given interval excluded. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_outside('temperature', 1e5, 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - cr = self.exclude_below(field, min_value, units) - cr = cr.exclude_above(field, max_value, units) - return cr - - def include_outside(self, field, min_value, max_value, units=None): - """ - This function will return a YTCutRegion where only the regions - whose field are outside the interval from min_value to max_value are - included. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - min_value : float - The minimum value inside the interval to be excluded. - max_value : float - The maximum value inside the interval to be excluded. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field outside the given interval excluded. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_outside('temperature', 1e5, 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - cr = self.exclude_inside(field, min_value, max_value, units) - return cr - - def exclude_below(self, field, value, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field is below a given value are masked. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the field below the given value masked. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_below('temperature', 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = 'obj["' + field + '"] >= ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") >= ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def exclude_nan(self, field, units=None): - """ - This function will return a YTCutRegion where all of the regions - whose field is NaN are masked. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with the NaN entries of the field masked. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.exclude_nan('temperature') - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = '~np.isnan(obj["' + field + '"])' - else: - field_cuts = '~np.isnan(obj["' + field + '"].in_units("' + units + '"))' - cr = self.cut_region(field_cuts, locals={"np": np}) - return cr - - def include_below(self, field, value, units=None): - """ - This function will return a YTCutRegion where only the regions - whose field is below a given value are included. - - Parameters - ---------- - field : string - The field in which the conditional will be applied. - value : float - The minimum value that will not be masked in the output - YTCutRegion. - units : string or None - The units of the value threshold. None will use the default units - given in the field. - - Returns - ------- - cut_region : YTCutRegion - The YTCutRegion with only regions with the field below the given - value included. - - Example - ------- - >>> ds = yt.load("RedshiftOutput0005") - >>> ad = ds.all_data() - >>> cr = ad.include_below('temperature', 1e5, 1e6) - >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') - """ - if units is None: - field_cuts = 'obj["' + field + '"] < ' + str(value) - else: - field_cuts = ( - 'obj["' + field + '"].in_units("' + units + '") < ' + str(value) - ) - cr = self.cut_region(field_cuts) - return cr - - def extract_isocontours( - self, field, value, filename=None, rescale=False, sample_values=None - ): - r"""This identifies isocontours on a cell-by-cell basis, with no - consideration of global connectedness, and returns the vertices of the - Triangles in that isocontour. - - This function simply returns the vertices of all the triangles - calculated by the `marching cubes - `_ algorithm; for more - complex operations, such as identifying connected sets of cells above a - given threshold, see the extract_connected_sets function. This is more - useful for calculating, for instance, total isocontour area, or - visualizing in an external program (such as `MeshLab - `_.) - - Parameters - ---------- - field : string - Any field that can be obtained in a data object. This is the field - which will be isocontoured. - value : float - The value at which the isocontour should be calculated. - filename : string, optional - If supplied, this file will be filled with the vertices in .obj - format. Suitable for loading into meshlab. - rescale : bool, optional - If true, the vertices will be rescaled within their min/max. - sample_values : string, optional - Any field whose value should be extracted at the center of each - triangle. - - Returns - ------- - verts : array of floats - The array of vertices, x,y,z. Taken in threes, these are the - triangle vertices. - samples : array of floats - If `sample_values` is specified, this will be returned and will - contain the values of the field specified at the center of each - triangle. - - Examples - -------- - This will create a data object, find a nice value in the center, and - output the vertices to "triangles.obj" after rescaling them. - - >>> dd = ds.all_data() - >>> rho = dd.quantities["WeightedAverageQuantity"]( - ... "Density", weight="CellMassMsun") - >>> verts = dd.extract_isocontours("Density", rho, - ... "triangles.obj", True) - """ - from yt.data_objects.static_output import ParticleDataset - from yt.frontends.stream.data_structures import StreamParticlesDataset - - verts = [] - samples = [] - if isinstance(self.ds, (ParticleDataset, StreamParticlesDataset)): - raise NotImplementedError - for block, mask in self.blocks: - my_verts = self._extract_isocontours_from_grid( - block, mask, field, value, sample_values - ) - if sample_values is not None: - my_verts, svals = my_verts - samples.append(svals) - verts.append(my_verts) - verts = np.concatenate(verts).transpose() - verts = self.comm.par_combine_object(verts, op="cat", datatype="array") - verts = verts.transpose() - if sample_values is not None: - samples = np.concatenate(samples) - samples = self.comm.par_combine_object(samples, op="cat", datatype="array") - if rescale: - mi = np.min(verts, axis=0) - ma = np.max(verts, axis=0) - verts = (verts - mi) / (ma - mi).max() - if filename is not None and self.comm.rank == 0: - if hasattr(filename, "write"): - f = filename - else: - f = open(filename, "w") - for v1 in verts: - f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2])) - for i in range(len(verts) // 3): - f.write("f %s %s %s\n" % (i * 3 + 1, i * 3 + 2, i * 3 + 3)) - if not hasattr(filename, "write"): - f.close() - if sample_values is not None: - return verts, samples - return verts - - def _extract_isocontours_from_grid( - self, grid, mask, field, value, sample_values=None - ): - vc_fields = [field] - if sample_values is not None: - vc_fields.append(sample_values) - - vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False) - try: - svals = vc_data[sample_values] - except KeyError: - svals = None - - my_verts = march_cubes_grid( - value, vc_data[field], mask, grid.LeftEdge, grid.dds, svals - ) - return my_verts - - def calculate_isocontour_flux( - self, field, value, field_x, field_y, field_z, fluxing_field=None - ): - r"""This identifies isocontours on a cell-by-cell basis, with no - consideration of global connectedness, and calculates the flux over - those contours. - - This function will conduct `marching cubes - `_ on all the cells in a - given data container (grid-by-grid), and then for each identified - triangular segment of an isocontour in a given cell, calculate the - gradient (i.e., normal) in the isocontoured field, interpolate the local - value of the "fluxing" field, the area of the triangle, and then return: - - area * local_flux_value * (n dot v) - - Where area, local_value, and the vector v are interpolated at the barycenter - (weighted by the vertex values) of the triangle. Note that this - specifically allows for the field fluxing across the surface to be - *different* from the field being contoured. If the fluxing_field is - not specified, it is assumed to be 1.0 everywhere, and the raw flux - with no local-weighting is returned. - - Additionally, the returned flux is defined as flux *into* the surface, - not flux *out of* the surface. - - Parameters - ---------- - field : string - Any field that can be obtained in a data object. This is the field - which will be isocontoured and used as the "local_value" in the - flux equation. - value : float - The value at which the isocontour should be calculated. - field_x : string - The x-component field - field_y : string - The y-component field - field_z : string - The z-component field - fluxing_field : string, optional - The field whose passage over the surface is of interest. If not - specified, assumed to be 1.0 everywhere. - - Returns - ------- - flux : float - The summed flux. Note that it is not currently scaled; this is - simply the code-unit area times the fields. - - Examples - -------- - This will create a data object, find a nice value in the center, and - calculate the metal flux over it. - - >>> dd = ds.all_data() - >>> rho = dd.quantities["WeightedAverageQuantity"]( - ... "Density", weight="CellMassMsun") - >>> flux = dd.calculate_isocontour_flux("Density", rho, - ... "velocity_x", "velocity_y", "velocity_z", "Metal_Density") - """ - flux = 0.0 - for block, mask in self.blocks: - flux += self._calculate_flux_in_grid( - block, mask, field, value, field_x, field_y, field_z, fluxing_field - ) - flux = self.comm.mpi_allreduce(flux, op="sum") - return flux - - def _calculate_flux_in_grid( - self, grid, mask, field, value, field_x, field_y, field_z, fluxing_field=None - ): - - vc_fields = [field, field_x, field_y, field_z] - if fluxing_field is not None: - vc_fields.append(fluxing_field) - - vc_data = grid.get_vertex_centered_data(vc_fields) - - if fluxing_field is None: - ff = np.ones_like(vc_data[field], dtype="float64") - else: - ff = vc_data[fluxing_field] - - return march_cubes_grid_flux( - value, - vc_data[field], - vc_data[field_x], - vc_data[field_y], - vc_data[field_z], - ff, - mask, - grid.LeftEdge, - grid.dds, - ) - - def extract_connected_sets( - self, field, num_levels, min_val, max_val, log_space=True, cumulative=True - ): - """ - This function will create a set of contour objects, defined - by having connected cell structures, which can then be - studied and used to 'paint' their source grids, thus enabling - them to be plotted. - - Note that this function *can* return a connected set object that has no - member values. - """ - if log_space: - cons = np.logspace(np.log10(min_val), np.log10(max_val), num_levels + 1) - else: - cons = np.linspace(min_val, max_val, num_levels + 1) - contours = {} - for level in range(num_levels): - contours[level] = {} - if cumulative: - mv = max_val - else: - mv = cons[level + 1] - from yt.data_objects.level_sets.api import identify_contours - from yt.data_objects.level_sets.clump_handling import add_contour_field - - nj, cids = identify_contours(self, field, cons[level], mv) - unique_contours = set([]) - for sl_list in cids.values(): - for _sl, ff in sl_list: - unique_contours.update(np.unique(ff)) - contour_key = uuid.uuid4().hex - # In case we're a cut region already... - base_object = getattr(self, "base_object", self) - add_contour_field(base_object.ds, contour_key) - for cid in sorted(unique_contours): - if cid == -1: - continue - contours[level][cid] = base_object.cut_region( - ["obj['contours_%s'] == %s" % (contour_key, cid)], - {"contour_slices_%s" % contour_key: cids}, - ) - return cons, contours - - def _get_bbox(self): - """ - Return the bounding box for this data container. - This generic version will return the bounds of the entire domain. - """ - return self.ds.domain_left_edge, self.ds.domain_right_edge - - def get_bbox(self): - """ - Return the bounding box for this data container. - """ - if self.ds.geometry != "cartesian": - raise NotImplementedError( - "get_bbox is currently only implemented " "for cartesian geometries!" - ) - le, re = self._get_bbox() - le.convert_to_units("code_length") - re.convert_to_units("code_length") - return le, re - - def volume(self): - """ - Return the volume of the data container. - This is found by adding up the volume of the cells with centers - in the container, rather than using the geometric shape of - the container, so this may vary very slightly - from what might be expected from the geometric volume. - """ - return self.quantities.total_quantity(("index", "cell_volume")) - - -class YTBooleanContainer(YTSelectionContainer3D): - """ - This is a boolean operation, accepting AND, OR, XOR, and NOT for combining - multiple data objects. - - This object is not designed to be created directly; it is designed to be - created implicitly by using one of the bitwise operations (&, \|, ^, \~) on - one or two other data objects. These correspond to the appropriate boolean - operations, and the resultant object can be nested. - - Parameters - ---------- - op : string - Can be AND, OR, XOR, NOT or NEG. - dobj1 : YTSelectionContainer - The first selection object - dobj2 : YTSelectionContainer - The second object - - Examples - -------- - - >>> import yt - >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - >>> sp = ds.sphere("c", 0.1) - >>> dd = ds.r[:,:,:] - >>> new_obj = sp ^ dd - >>> print(new_obj.sum("cell_volume"), dd.sum("cell_volume") - - ... sp.sum("cell_volume")) - """ - - _type_name = "bool" - _con_args = ("op", "dobj1", "dobj2") - - def __init__( - self, op, dobj1, dobj2, ds=None, field_parameters=None, data_source=None - ): - YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) - self.op = op.upper() - self.dobj1 = dobj1 - self.dobj2 = dobj2 - name = "Boolean%sSelector" % (self.op,) - sel_cls = getattr(yt.geometry.selection_routines, name) - self._selector = sel_cls(self) - - def _get_bbox(self): - le1, re1 = self.dobj1._get_bbox() - if self.op == "NOT": - return le1, re1 - else: - le2, re2 = self.dobj2._get_bbox() - return np.minimum(le1, le2), np.maximum(re1, re2) - - # Many of these items are set up specifically to ensure that # we are not breaking old pickle files. This means we must only call the # _reconstruct_object and that we cannot mandate any additional arguments to diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index a80d461f71d..e5157923372 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -5,7 +5,7 @@ import yt.geometry.particle_deposit as particle_deposit from yt.config import ytcfg -from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.funcs import iterable from yt.geometry.selection_routines import convert_mask_to_indices from yt.units.yt_array import YTArray diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index a72772753e9..99211e85d4e 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -4,7 +4,7 @@ import yt.geometry.particle_deposit as particle_deposit import yt.geometry.particle_smooth as particle_smooth -from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.funcs import mylog from yt.geometry.particle_oct_container import ParticleOctreeContainer from yt.units.dimensions import length diff --git a/yt/data_objects/particle_container.py b/yt/data_objects/particle_container.py index f59ef908cd8..4cd1b7cba33 100644 --- a/yt/data_objects/particle_container.py +++ b/yt/data_objects/particle_container.py @@ -1,6 +1,7 @@ import contextlib -from yt.data_objects.data_containers import YTFieldData, YTSelectionContainer +from yt.data_objects.data_containers import YTFieldData +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.funcs import ensure_list from yt.utilities.exceptions import ( YTDataSelectorNotImplemented, diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py deleted file mode 100644 index 377bc27888a..00000000000 --- a/yt/data_objects/selection_data_containers.py +++ /dev/null @@ -1,1332 +0,0 @@ -import numpy as np - -from yt.data_objects.data_containers import ( - YTSelectionContainer, - YTSelectionContainer0D, - YTSelectionContainer1D, - YTSelectionContainer2D, - YTSelectionContainer3D, -) -from yt.data_objects.static_output import Dataset -from yt.frontends.sph.data_structures import SPHDataset -from yt.funcs import ( - ensure_list, - fix_axis, - fix_length, - iterable, - mylog, - validate_3d_array, - validate_axis, - validate_center, - validate_float, - validate_iterable, - validate_object, - validate_width_tuple, -) -from yt.geometry.selection_routines import points_in_cells -from yt.units.yt_array import YTArray, YTQuantity, udot, unorm -from yt.utilities.exceptions import ( - YTEllipsoidOrdering, - YTException, - YTIllDefinedCutRegion, - YTSphereTooSmall, -) -from yt.utilities.lib.pixelization_routines import SPHKernelInterpolationTable -from yt.utilities.math_utils import get_rotation_matrix -from yt.utilities.minimal_representation import MinimalSliceData -from yt.utilities.on_demand_imports import _miniball, _scipy -from yt.utilities.orientation import Orientation - - -class YTPoint(YTSelectionContainer0D): - """ - A 0-dimensional object defined by a single point - - Parameters - ---------- - p: array_like - A points defined within the domain. If the domain is - periodic its position will be corrected to lie inside - the range [DLE,DRE) to ensure one and only one cell may - match that point - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the data_set - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> c = [0.5,0.5,0.5] - >>> point = ds.point(c) - """ - - _type_name = "point" - _con_args = ("p",) - - def __init__(self, p, ds=None, field_parameters=None, data_source=None): - validate_3d_array(p) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - super(YTPoint, self).__init__(ds, field_parameters, data_source) - if isinstance(p, YTArray): - # we pass p through ds.arr to ensure code units are attached - self.p = self.ds.arr(p) - else: - self.p = self.ds.arr(p, "code_length") - - -class YTOrthoRay(YTSelectionContainer1D): - """ - This is an orthogonal ray cast through the entire domain, at a specific - coordinate. - - This object is typically accessed through the `ortho_ray` object that - hangs off of index objects. The resulting arrays have their - dimensionality reduced to one, and an ordered list of points at an - (x,y) tuple along `axis` are available. - - Parameters - ---------- - axis : int or char - The axis along which to slice. Can be 0, 1, or 2 for x, y, z. - coords : tuple of floats - The (plane_x, plane_y) coordinates at which to cast the ray. Note - that this is in the plane coordinates: so if you are casting along - x, this will be (y, z). If you are casting along y, this will be - (z, x). If you are casting along z, this will be (x, y). - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the data_set - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> oray = ds.ortho_ray(0, (0.2, 0.74)) - >>> print(oray["Density"]) - - Note: The low-level data representation for rays are not guaranteed to be - spatially ordered. In particular, with AMR datasets, higher resolution - data is tagged on to the end of the ray. If you want this data - represented in a spatially ordered manner, manually sort it by the "t" - field, which is the value of the parametric variable that goes from 0 at - the start of the ray to 1 at the end: - - >>> my_ray = ds.ortho_ray(...) - >>> ray_sort = np.argsort(my_ray["t"]) - >>> density = my_ray["density"][ray_sort] - """ - - _key_fields = ["x", "y", "z", "dx", "dy", "dz"] - _type_name = "ortho_ray" - _con_args = ("axis", "coords") - - def __init__(self, axis, coords, ds=None, field_parameters=None, data_source=None): - validate_axis(ds, axis) - validate_iterable(coords) - for c in coords: - validate_float(c) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - super(YTOrthoRay, self).__init__(ds, field_parameters, data_source) - self.axis = fix_axis(axis, self.ds) - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - self.px_ax = xax - self.py_ax = yax - # Even though we may not be using x,y,z we use them here. - self.px_dx = "d%s" % ("xyz"[self.px_ax]) - self.py_dx = "d%s" % ("xyz"[self.py_ax]) - # Convert coordinates to code length. - if isinstance(coords[0], YTQuantity): - self.px = self.ds.quan(coords[0]).to("code_length") - else: - self.px = self.ds.quan(coords[0], "code_length") - if isinstance(coords[1], YTQuantity): - self.py = self.ds.quan(coords[1]).to("code_length") - else: - self.py = self.ds.quan(coords[1], "code_length") - self.sort_by = "xyz"[self.axis] - - @property - def coords(self): - return (self.px, self.py) - - -class YTRay(YTSelectionContainer1D): - """ - This is an arbitrarily-aligned ray cast through the entire domain, at a - specific coordinate. - - This object is typically accessed through the `ray` object that hangs - off of index objects. The resulting arrays have their - dimensionality reduced to one, and an ordered list of points at an - (x,y) tuple along `axis` are available, as is the `t` field, which - corresponds to a unitless measurement along the ray from start to - end. - - Parameters - ---------- - start_point : array-like set of 3 floats - The place where the ray starts. - end_point : array-like set of 3 floats - The place where the ray ends. - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the data_set - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31)) - >>> print(ray["Density"], ray["t"], ray["dts"]) - - Note: The low-level data representation for rays are not guaranteed to be - spatially ordered. In particular, with AMR datasets, higher resolution - data is tagged on to the end of the ray. If you want this data - represented in a spatially ordered manner, manually sort it by the "t" - field, which is the value of the parametric variable that goes from 0 at - the start of the ray to 1 at the end: - - >>> my_ray = ds.ray(...) - >>> ray_sort = np.argsort(my_ray["t"]) - >>> density = my_ray["density"][ray_sort] - """ - - _type_name = "ray" - _con_args = ("start_point", "end_point") - _container_fields = ("t", "dts") - - def __init__( - self, start_point, end_point, ds=None, field_parameters=None, data_source=None - ): - validate_3d_array(start_point) - validate_3d_array(end_point) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - super(YTRay, self).__init__(ds, field_parameters, data_source) - if isinstance(start_point, YTArray): - self.start_point = self.ds.arr(start_point).to("code_length") - else: - self.start_point = self.ds.arr(start_point, "code_length", dtype="float64") - if isinstance(end_point, YTArray): - self.end_point = self.ds.arr(end_point).to("code_length") - else: - self.end_point = self.ds.arr(end_point, "code_length", dtype="float64") - if (self.start_point < self.ds.domain_left_edge).any() or ( - self.end_point > self.ds.domain_right_edge - ).any(): - mylog.warn( - "Ray start or end is outside the domain. " - + "Returned data will only be for the ray section inside the domain." - ) - self.vec = self.end_point - self.start_point - self._set_center(self.start_point) - self.set_field_parameter("center", self.start_point) - self._dts, self._ts = None, None - - def _generate_container_field(self, field): - # What should we do with `ParticleDataset`? - if isinstance(self.ds, SPHDataset): - return self._generate_container_field_sph(field) - else: - return self._generate_container_field_grid(field) - - def _generate_container_field_grid(self, field): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - if field == "dts": - return self._current_chunk.dtcoords - elif field == "t": - return self._current_chunk.tcoords - else: - raise KeyError(field) - - def _generate_container_field_sph(self, field): - if field not in ["dts", "t"]: - raise KeyError(field) - - length = unorm(self.vec) - pos = self[self.ds._sph_ptypes[0], "particle_position"] - r = pos - self.start_point - l = udot(r, self.vec / length) - - if field == "t": - return l / length - - hsml = self[self.ds._sph_ptypes[0], "smoothing_length"] - mass = self[self.ds._sph_ptypes[0], "particle_mass"] - dens = self[self.ds._sph_ptypes[0], "density"] - # impact parameter from particle to ray - b = np.sqrt(np.sum(r ** 2, axis=1) - l ** 2) - - # Use an interpolation table to evaluate the integrated 2D - # kernel from the dimensionless impact parameter b/hsml. - itab = SPHKernelInterpolationTable(self.ds.kernel_name) - dl = itab.interpolate_array(b / hsml) * mass / dens / hsml ** 2 - return dl / length - - -class YTSlice(YTSelectionContainer2D): - """ - This is a data object corresponding to a slice through the simulation - domain. - - This object is typically accessed through the `slice` object that hangs - off of index objects. Slice is an orthogonal slice through the - data, taking all the points at the finest resolution available and then - indexing them. It is more appropriately thought of as a slice - 'operator' than an object, however, as its field and coordinate can - both change. - - Parameters - ---------- - axis : int or char - The axis along which to slice. Can be 0, 1, or 2 for x, y, z. - coord : float - The coordinate along the axis at which to slice. This is in - "domain" coordinates. - center : array_like, optional - The 'center' supplied to fields that use it. Note that this does - not have to have `coord` as one value. optional. - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the data_set - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> slice = ds.slice(0, 0.25) - >>> print(slice["Density"]) - """ - - _top_node = "/Slices" - _type_name = "slice" - _con_args = ("axis", "coord") - _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz") - - def __init__( - self, axis, coord, center=None, ds=None, field_parameters=None, data_source=None - ): - validate_axis(ds, axis) - validate_float(coord) - # center is an optional parameter - if center is not None: - validate_center(center) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer2D.__init__(self, axis, ds, field_parameters, data_source) - self._set_center(center) - self.coord = coord - - def _generate_container_field(self, field): - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - if self._current_chunk is None: - self.index._identify_base_chunk(self) - if field == "px": - return self._current_chunk.fcoords[:, xax] - elif field == "py": - return self._current_chunk.fcoords[:, yax] - elif field == "pz": - return self._current_chunk.fcoords[:, self.axis] - elif field == "pdx": - return self._current_chunk.fwidth[:, xax] * 0.5 - elif field == "pdy": - return self._current_chunk.fwidth[:, yax] * 0.5 - elif field == "pdz": - return self._current_chunk.fwidth[:, self.axis] * 0.5 - else: - raise KeyError(field) - - @property - def _mrep(self): - return MinimalSliceData(self) - - def to_pw(self, fields=None, center="c", width=None, origin="center-window"): - r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this - object. - - This is a bare-bones mechanism of creating a plot window from this - object, which can then be moved around, zoomed, and on and on. All - behavior of the plot window is relegated to that routine. - """ - pw = self._get_pw(fields, center, width, origin, "Slice") - return pw - - def plot(self, fields=None): - if hasattr(self._data_source, "left_edge") and hasattr( - self._data_source, "right_edge" - ): - left_edge = self._data_source.left_edge - right_edge = self._data_source.right_edge - center = (left_edge + right_edge) / 2.0 - width = right_edge - left_edge - xax = self.ds.coordinates.x_axis[self.axis] - yax = self.ds.coordinates.y_axis[self.axis] - lx, rx = left_edge[xax], right_edge[xax] - ly, ry = left_edge[yax], right_edge[yax] - width = (rx - lx), (ry - ly) - else: - width = self.ds.domain_width - center = self.ds.domain_center - pw = self._get_pw(fields, center, width, "native", "Slice") - pw.show() - return pw - - -class YTCuttingPlane(YTSelectionContainer2D): - """ - This is a data object corresponding to an oblique slice through the - simulation domain. - - This object is typically accessed through the `cutting` object - that hangs off of index objects. A cutting plane is an oblique - plane through the data, defined by a normal vector and a coordinate. - It attempts to guess an 'north' vector, which can be overridden, and - then it pixelizes the appropriate data onto the plane without - interpolation. - - Parameters - ---------- - normal : array_like - The vector that defines the desired plane. For instance, the - angular momentum of a sphere. - center : array_like - The center of the cutting plane, where the normal vector is anchored. - north_vector: array_like, optional - An optional vector to describe the north-facing direction in the resulting - plane. - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the dataset - - Notes - ----- - - This data object in particular can be somewhat expensive to create. - It's also important to note that unlike the other 2D data objects, this - object provides px, py, pz, as some cells may have a height from the - plane. - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6]) - >>> print(cp["Density"]) - """ - - _plane = None - _top_node = "/CuttingPlanes" - _key_fields = YTSelectionContainer2D._key_fields + ["pz", "pdz"] - _type_name = "cutting" - _con_args = ("normal", "center") - _tds_attrs = ("_inv_mat",) - _tds_fields = ("x", "y", "z", "dx") - _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz") - - def __init__( - self, - normal, - center, - north_vector=None, - ds=None, - field_parameters=None, - data_source=None, - ): - validate_3d_array(normal) - validate_center(center) - if north_vector is not None: - validate_3d_array(north_vector) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer2D.__init__(self, 4, ds, field_parameters, data_source) - self._set_center(center) - self.set_field_parameter("center", center) - # Let's set up our plane equation - # ax + by + cz + d = 0 - self.orienter = Orientation(normal, north_vector=north_vector) - self._norm_vec = self.orienter.normal_vector - self._d = -1.0 * np.dot(self._norm_vec, self.center) - self._x_vec = self.orienter.unit_vectors[0] - self._y_vec = self.orienter.unit_vectors[1] - # First we try all three, see which has the best result: - self._rot_mat = np.array([self._x_vec, self._y_vec, self._norm_vec]) - self._inv_mat = np.linalg.pinv(self._rot_mat) - self.set_field_parameter("cp_x_vec", self._x_vec) - self.set_field_parameter("cp_y_vec", self._y_vec) - self.set_field_parameter("cp_z_vec", self._norm_vec) - - @property - def normal(self): - return self._norm_vec - - def _generate_container_field(self, field): - if self._current_chunk is None: - self.index._identify_base_chunk(self) - if field == "px": - x = self._current_chunk.fcoords[:, 0] - self.center[0] - y = self._current_chunk.fcoords[:, 1] - self.center[1] - z = self._current_chunk.fcoords[:, 2] - self.center[2] - tr = np.zeros(x.size, dtype="float64") - tr = self.ds.arr(tr, "code_length") - tr += x * self._x_vec[0] - tr += y * self._x_vec[1] - tr += z * self._x_vec[2] - return tr - elif field == "py": - x = self._current_chunk.fcoords[:, 0] - self.center[0] - y = self._current_chunk.fcoords[:, 1] - self.center[1] - z = self._current_chunk.fcoords[:, 2] - self.center[2] - tr = np.zeros(x.size, dtype="float64") - tr = self.ds.arr(tr, "code_length") - tr += x * self._y_vec[0] - tr += y * self._y_vec[1] - tr += z * self._y_vec[2] - return tr - elif field == "pz": - x = self._current_chunk.fcoords[:, 0] - self.center[0] - y = self._current_chunk.fcoords[:, 1] - self.center[1] - z = self._current_chunk.fcoords[:, 2] - self.center[2] - tr = np.zeros(x.size, dtype="float64") - tr = self.ds.arr(tr, "code_length") - tr += x * self._norm_vec[0] - tr += y * self._norm_vec[1] - tr += z * self._norm_vec[2] - return tr - elif field == "pdx": - return self._current_chunk.fwidth[:, 0] * 0.5 - elif field == "pdy": - return self._current_chunk.fwidth[:, 1] * 0.5 - elif field == "pdz": - return self._current_chunk.fwidth[:, 2] * 0.5 - else: - raise KeyError(field) - - def to_pw(self, fields=None, center="c", width=None, axes_unit=None): - r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this - object. - - This is a bare-bones mechanism of creating a plot window from this - object, which can then be moved around, zoomed, and on and on. All - behavior of the plot window is relegated to that routine. - """ - normal = self.normal - center = self.center - self.fields = ensure_list(fields) + [ - k for k in self.field_data.keys() if k not in self._key_fields - ] - from yt.visualization.fixed_resolution import FixedResolutionBuffer - from yt.visualization.plot_window import ( - PWViewerMPL, - get_oblique_window_parameters, - ) - - (bounds, center_rot) = get_oblique_window_parameters( - normal, center, width, self.ds - ) - pw = PWViewerMPL( - self, - bounds, - fields=self.fields, - origin="center-window", - periodic=False, - oblique=True, - frb_generator=FixedResolutionBuffer, - plot_type="OffAxisSlice", - ) - if axes_unit is not None: - pw.set_axes_unit(axes_unit) - pw._setup_plots() - return pw - - def to_frb(self, width, resolution, height=None, periodic=False): - r"""This function returns a FixedResolutionBuffer generated from this - object. - - An ObliqueFixedResolutionBuffer is an object that accepts a - variable-resolution 2D object and transforms it into an NxM bitmap that - can be plotted, examined or processed. This is a convenience function - to return an FRB directly from an existing 2D data object. Unlike the - corresponding to_frb function for other YTSelectionContainer2D objects, - this does not accept a 'center' parameter as it is assumed to be - centered at the center of the cutting plane. - - Parameters - ---------- - width : width specifier - This can either be a floating point value, in the native domain - units of the simulation, or a tuple of the (value, unit) style. - This will be the width of the FRB. - height : height specifier, optional - This will be the height of the FRB, by default it is equal to width. - resolution : int or tuple of ints - The number of pixels on a side of the final FRB. - periodic : boolean - This can be true or false, and governs whether the pixelization - will span the domain boundaries. - - Returns - ------- - frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer` - A fixed resolution buffer, which can be queried for fields. - - Examples - -------- - - >>> v, c = ds.find_max("density") - >>> sp = ds.sphere(c, (100.0, 'au')) - >>> L = sp.quantities.angular_momentum_vector() - >>> cutting = ds.cutting(L, c) - >>> frb = cutting.to_frb( (1.0, 'pc'), 1024) - >>> write_image(np.log10(frb["Density"]), 'density_1pc.png') - """ - if iterable(width): - validate_width_tuple(width) - width = self.ds.quan(width[0], width[1]) - if height is None: - height = width - elif iterable(height): - validate_width_tuple(height) - height = self.ds.quan(height[0], height[1]) - if not iterable(resolution): - resolution = (resolution, resolution) - from yt.visualization.fixed_resolution import FixedResolutionBuffer - - bounds = (-width / 2.0, width / 2.0, -height / 2.0, height / 2.0) - frb = FixedResolutionBuffer(self, bounds, resolution, periodic=periodic) - return frb - - -class YTDisk(YTSelectionContainer3D): - """ - By providing a *center*, a *normal*, a *radius* and a *height* we - can define a cylinder of any proportion. Only cells whose centers are - within the cylinder will be selected. - - Parameters - ---------- - center : array_like - coordinate to which the normal, radius, and height all reference - normal : array_like - the normal vector defining the direction of lengthwise part of the - cylinder - radius : float - the radius of the cylinder - height : float - the distance from the midplane of the cylinder to the top and - bottom planes - fields : array of fields, optional - any fields to be pre-loaded in the cylinder object - ds: ~yt.data_objects.static_output.Dataset, optional - An optional dataset to use rather than self.ds - field_parameters : dictionary - A dictionary of field parameters than can be accessed by derived - fields. - data_source: optional - Draw the selection from the provided data source rather than - all data associated with the data_set - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> c = [0.5,0.5,0.5] - >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc')) - """ - - _type_name = "disk" - _con_args = ("center", "_norm_vec", "radius", "height") - - def __init__( - self, - center, - normal, - radius, - height, - fields=None, - ds=None, - field_parameters=None, - data_source=None, - ): - validate_center(center) - validate_3d_array(normal) - validate_float(radius) - validate_float(height) - validate_iterable(fields) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) - self._norm_vec = np.array(normal) / np.sqrt(np.dot(normal, normal)) - self.set_field_parameter("normal", self._norm_vec) - self.set_field_parameter("center", self.center) - self.height = fix_length(height, self.ds) - self.radius = fix_length(radius, self.ds) - self._d = -1.0 * np.dot(self._norm_vec, self.center) - - def _get_bbox(self): - """ - Return the minimum bounding box for the disk. - """ - # http://www.iquilezles.org/www/articles/diskbbox/diskbbox.htm - pa = self.center + self._norm_vec * self.height - pb = self.center - self._norm_vec * self.height - a = pa - pb - db = self.radius * np.sqrt(1.0 - a.d * a.d / np.dot(a, a)) - return np.minimum(pa - db, pb - db), np.maximum(pa + db, pb + db) - - -class YTRegion(YTSelectionContainer3D): - """A 3D region of data with an arbitrary center. - - Takes an array of three *left_edge* coordinates, three - *right_edge* coordinates, and a *center* that can be anywhere - in the domain. If the selected region extends past the edges - of the domain, no data will be found there, though the - object's `left_edge` or `right_edge` are not modified. - - Parameters - ---------- - center : array_like - The center of the region - left_edge : array_like - The left edge of the region - right_edge : array_like - The right edge of the region - """ - - _type_name = "region" - _con_args = ("center", "left_edge", "right_edge") - - def __init__( - self, - center, - left_edge, - right_edge, - fields=None, - ds=None, - field_parameters=None, - data_source=None, - ): - if center is not None: - validate_center(center) - validate_3d_array(left_edge) - validate_3d_array(right_edge) - validate_iterable(fields) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) - if not isinstance(left_edge, YTArray): - self.left_edge = self.ds.arr(left_edge, "code_length", dtype="float64") - else: - # need to assign this dataset's unit registry to the YTArray - self.left_edge = self.ds.arr(left_edge.copy(), dtype="float64") - if not isinstance(right_edge, YTArray): - self.right_edge = self.ds.arr(right_edge, "code_length", dtype="float64") - else: - # need to assign this dataset's unit registry to the YTArray - self.right_edge = self.ds.arr(right_edge.copy(), dtype="float64") - - def _get_bbox(self): - """ - Return the minimum bounding box for the region. - """ - return self.left_edge, self.right_edge - - -class YTDataCollection(YTSelectionContainer3D): - """ - By selecting an arbitrary *object_list*, we can act on those grids. - Child cells are not returned. - """ - - _type_name = "data_collection" - _con_args = ("_obj_list",) - - def __init__( - self, obj_list, ds=None, field_parameters=None, data_source=None, center=None - ): - validate_iterable(obj_list) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - if center is not None: - validate_center(center) - YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) - self._obj_ids = np.array([o.id - o._id_offset for o in obj_list], dtype="int64") - self._obj_list = obj_list - - -class YTSphere(YTSelectionContainer3D): - """ - A sphere of points defined by a *center* and a *radius*. - - Parameters - ---------- - center : array_like - The center of the sphere. - radius : float, width specifier, or YTQuantity - The radius of the sphere. If passed a float, - that will be interpreted in code units. Also - accepts a (radius, unit) tuple or YTQuantity - instance with units attached. - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> c = [0.5,0.5,0.5] - >>> sphere = ds.sphere(c, (1., "kpc")) - """ - - _type_name = "sphere" - _con_args = ("center", "radius") - - def __init__( - self, center, radius, ds=None, field_parameters=None, data_source=None - ): - validate_center(center) - validate_float(radius) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - super(YTSphere, self).__init__(center, ds, field_parameters, data_source) - # Unpack the radius, if necessary - radius = fix_length(radius, self.ds) - if radius < self.index.get_smallest_dx(): - raise YTSphereTooSmall( - ds, - radius.in_units("code_length"), - self.index.get_smallest_dx().in_units("code_length"), - ) - self.set_field_parameter("radius", radius) - self.set_field_parameter("center", self.center) - self.radius = radius - - def _get_bbox(self): - """ - Return the minimum bounding box for the sphere. - """ - return -self.radius + self.center, self.radius + self.center - - -class YTMinimalSphere(YTSelectionContainer3D): - """ - Build the smallest sphere that encompasses a set of points. - - Parameters - ---------- - points : YTArray - The points that the sphere will contain. - - Examples - -------- - - >>> import yt - >>> ds = yt.load("output_00080/info_00080.txt") - >>> points = ds.r['particle_position'] - >>> sphere = ds.minimal_sphere(points) - """ - - _type_name = "sphere" - _override_selector_name = "minimal_sphere" - _con_args = ("center", "radius") - - def __init__(self, points, ds=None, field_parameters=None, data_source=None): - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - validate_object(points, YTArray) - - points = fix_length(points, ds) - if len(points) < 2: - raise YTException( - "Not enough points. Expected at least 2, got %s" % len(points) - ) - mylog.debug("Building minimal sphere around points.") - mb = _miniball.Miniball(points) - if not mb.is_valid(): - raise YTException("Could not build valid sphere around points.") - - center = ds.arr(mb.center(), points.units) - radius = ds.quan(np.sqrt(mb.squared_radius()), points.units) - super(YTMinimalSphere, self).__init__(center, ds, field_parameters, data_source) - self.set_field_parameter("radius", radius) - self.set_field_parameter("center", self.center) - self.radius = radius - - -class YTEllipsoid(YTSelectionContainer3D): - """ - By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we - can define a ellipsoid of any proportion. Only cells whose - centers are within the ellipsoid will be selected. - - Parameters - ---------- - center : array_like - The center of the ellipsoid. - A : float - The magnitude of the largest axis (semi-major) of the ellipsoid. - B : float - The magnitude of the medium axis (semi-medium) of the ellipsoid. - C : float - The magnitude of the smallest axis (semi-minor) of the ellipsoid. - e0 : array_like (automatically normalized) - the direction of the largest semi-major axis of the ellipsoid - tilt : float - After the rotation about the z-axis to allign e0 to x in the x-y - plane, and then rotating about the y-axis to align e0 completely - to the x-axis, tilt is the angle in radians remaining to - rotate about the x-axis to align both e1 to the y-axis and e2 to - the z-axis. - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> c = [0.5,0.5,0.5] - >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2) - """ - - _type_name = "ellipsoid" - _con_args = ("center", "_A", "_B", "_C", "_e0", "_tilt") - - def __init__( - self, - center, - A, - B, - C, - e0, - tilt, - fields=None, - ds=None, - field_parameters=None, - data_source=None, - ): - validate_center(center) - validate_float(A) - validate_float(B) - validate_float(C) - validate_3d_array(e0) - validate_float(tilt) - validate_iterable(fields) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) - # make sure the magnitudes of semi-major axes are in order - if A < B or B < C: - raise YTEllipsoidOrdering(ds, A, B, C) - # make sure the smallest side is not smaller than dx - self._A = self.ds.quan(A, "code_length") - self._B = self.ds.quan(B, "code_length") - self._C = self.ds.quan(C, "code_length") - if self._C < self.index.get_smallest_dx(): - raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx()) - self._e0 = e0 = e0 / (e0 ** 2.0).sum() ** 0.5 - self._tilt = tilt - - # find the t1 angle needed to rotate about z axis to align e0 to x - t1 = np.arctan(e0[1] / e0[0]) - # rotate e0 by -t1 - RZ = get_rotation_matrix(t1, (0, 0, 1)).transpose() - r1 = (e0 * RZ).sum(axis=1) - # find the t2 angle needed to rotate about y axis to align e0 to x - t2 = np.arctan(-r1[2] / r1[0]) - """ - calculate the original e1 - given the tilt about the x axis when e0 was aligned - to x after t1, t2 rotations about z, y - """ - RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose() - RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose() - RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose() - e1 = ((0, 1, 0) * RX).sum(axis=1) - e1 = (e1 * RY).sum(axis=1) - e1 = (e1 * RZ).sum(axis=1) - e2 = np.cross(e0, e1) - - self._e1 = e1 - self._e2 = e2 - - self.set_field_parameter("A", A) - self.set_field_parameter("B", B) - self.set_field_parameter("C", C) - self.set_field_parameter("e0", e0) - self.set_field_parameter("e1", e1) - self.set_field_parameter("e2", e2) - - def _get_bbox(self): - """ - Get the bounding box for the ellipsoid. NOTE that in this case - it is not the *minimum* bounding box. - """ - radius = self.ds.arr(np.max([self._A, self._B, self._C]), "code_length") - return -radius + self.center, radius + self.center - - -class YTCutRegion(YTSelectionContainer3D): - """ - This is a data object designed to allow individuals to apply logical - operations to fields and filter as a result of those cuts. - - Parameters - ---------- - data_source : YTSelectionContainer3D - The object to which cuts will be applied. - conditionals : list of strings - A list of conditionals that will be evaluated. In the namespace - available, these conditionals will have access to 'obj' which is a data - object of unknown shape, and they must generate a boolean array. For - instance, conditionals = ["obj['temperature'] < 1e3"] - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> sp = ds.sphere("max", (1.0, 'Mpc')) - >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"]) - """ - - _type_name = "cut_region" - _con_args = ("base_object", "conditionals") - - def __init__( - self, - data_source, - conditionals, - ds=None, - field_parameters=None, - base_object=None, - locals=None, - ): - if locals is None: - locals = {} - validate_object(data_source, YTSelectionContainer) - validate_iterable(conditionals) - for condition in conditionals: - validate_object(condition, str) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(base_object, YTSelectionContainer) - if base_object is not None: - # passing base_object explicitly has been deprecated, - # but we handle it here for backward compatibility - if data_source is not None: - raise RuntimeError("Cannot use both base_object and data_source") - data_source = base_object - - self.conditionals = ensure_list(conditionals) - if isinstance(data_source, YTCutRegion): - # If the source is also a cut region, add its conditionals - # and set the source to be its source. - # Preserve order of conditionals. - self.conditionals = data_source.conditionals + self.conditionals - data_source = data_source.base_object - - super(YTCutRegion, self).__init__( - data_source.center, ds, field_parameters, data_source=data_source - ) - self.base_object = data_source - self.locals = locals - self._selector = None - # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth, - # ires and get_data - - def chunks(self, fields, chunking_style, **kwargs): - # We actually want to chunk the sub-chunk, not ourselves. We have no - # chunks to speak of, as we do not data IO. - for chunk in self.index._chunk(self.base_object, chunking_style, **kwargs): - with self.base_object._chunked_read(chunk): - with self._chunked_read(chunk): - self.get_data(fields) - yield self - - def get_data(self, fields=None): - fields = ensure_list(fields) - self.base_object.get_data(fields) - ind = self._cond_ind - for field in fields: - f = self.base_object[field] - if f.shape != ind.shape: - parent = getattr(self, "parent", self.base_object) - self.field_data[field] = parent[field][self._part_ind(field[0])] - else: - self.field_data[field] = self.base_object[field][ind] - - @property - def blocks(self): - # We have to take a slightly different approach here. Note that all - # that .blocks has to yield is a 3D array and a mask. - for obj, m in self.base_object.blocks: - m = m.copy() - with obj._field_parameter_state(self.field_parameters): - for cond in self.conditionals: - ss = eval(cond) - m = np.logical_and(m, ss, m) - if not np.any(m): - continue - yield obj, m - - @property - def _cond_ind(self): - ind = None - obj = self.base_object - locals = self.locals.copy() - if "obj" in locals: - raise RuntimeError( - '"obj" has been defined in the "locals" ; this is not supported, please rename the variable.' - ) - locals["obj"] = obj - with obj._field_parameter_state(self.field_parameters): - for cond in self.conditionals: - res = eval(cond, locals) - if ind is None: - ind = res - if ind.shape != res.shape: - raise YTIllDefinedCutRegion(self.conditionals) - np.logical_and(res, ind, ind) - return ind - - def _part_ind_KDTree(self, ptype): - """Find the particles in cells using a KDTree approach.""" - parent = getattr(self, "parent", self.base_object) - units = "code_length" - - pos = np.stack( - [ - self[("index", "x")].to(units), - self[("index", "y")].to(units), - self[("index", "z")].to(units), - ], - axis=1, - ).value - dx = np.stack( - [ - self[("index", "dx")].to(units), - self[("index", "dy")].to(units), - self[("index", "dz")].to(units), - ], - axis=1, - ).value - ppos = np.stack( - [ - parent[(ptype, "particle_position_x")], - parent[(ptype, "particle_position_y")], - parent[(ptype, "particle_position_z")], - ], - axis=1, - ).value - levels = self[("index", "grid_level")].astype("int32").value - levelmin = levels.min() - levelmax = levels.max() - - mask = np.zeros(ppos.shape[0], dtype=bool) - - for lvl in range(levelmax, levelmin - 1, -1): - # Filter out cells not in the current level - lvl_mask = levels == lvl - dx_loc = dx[lvl_mask] - pos_loc = pos[lvl_mask] - - grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1) - - # Compute closest cell for all remaining particles - dist, icell = grid_tree.query( - ppos[~mask], distance_upper_bound=dx_loc.max(), p=np.inf - ) - mask_loc = np.isfinite(dist[:]) - - # Check that particles within dx of a cell are in it - i = icell[mask_loc] - dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i]) - tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1) - - mask_loc[mask_loc] = tmp_mask - - # Update the particle mask with particles found at this level - mask[~mask] |= mask_loc - - return mask - - def _part_ind_brute_force(self, ptype): - parent = getattr(self, "parent", self.base_object) - units = "code_length" - mask = points_in_cells( - self[("index", "x")].to(units), - self[("index", "y")].to(units), - self[("index", "z")].to(units), - self[("index", "dx")].to(units), - self[("index", "dy")].to(units), - self[("index", "dz")].to(units), - parent[(ptype, "particle_position_x")].to(units), - parent[(ptype, "particle_position_y")].to(units), - parent[(ptype, "particle_position_z")].to(units), - ) - - return mask - - def _part_ind(self, ptype): - # If scipy is installed, use the fast KD tree - # implementation. Else, fall back onto the direct - # brute-force algorithm. - try: - _scipy.spatial.KDTree - return self._part_ind_KDTree(ptype) - except ImportError: - return self._part_ind_brute_force(ptype) - - @property - def icoords(self): - return self.base_object.icoords[self._cond_ind, :] - - @property - def fcoords(self): - return self.base_object.fcoords[self._cond_ind, :] - - @property - def ires(self): - return self.base_object.ires[self._cond_ind] - - @property - def fwidth(self): - return self.base_object.fwidth[self._cond_ind, :] - - def _get_bbox(self): - """ - Get the bounding box for the cut region. Here we just use - the bounding box for the source region. - """ - return self.base_object._get_bbox() - - -class YTIntersectionContainer3D(YTSelectionContainer3D): - """ - This is a more efficient method of selecting the intersection of multiple - data selection objects. - - Creating one of these objects returns the intersection of all of the - sub-objects; it is designed to be a faster method than chaining & ("and") - operations to create a single, large intersection. - - Parameters - ---------- - data_objects : Iterable of YTSelectionContainer - The data objects to intersect - - Examples - -------- - - >>> import yt - >>> ds = yt.load("RedshiftOutput0005") - >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15) - >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1) - >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15) - >>> new_obj = ds.intersection((sp1, sp2, sp3)) - >>> print(new_obj.sum("cell_volume")) - """ - - _type_name = "intersection" - _con_args = ("data_objects",) - - def __init__(self, data_objects, ds=None, field_parameters=None, data_source=None): - validate_iterable(data_objects) - for obj in data_objects: - validate_object(obj, YTSelectionContainer) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) - # ensure_list doesn't check for tuples - if isinstance(data_objects, tuple): - data_objects = list(data_objects) - self.data_objects = ensure_list(data_objects) - - -class YTDataObjectUnion(YTSelectionContainer3D): - """ - This is a more efficient method of selecting the union of multiple - data selection objects. - - Creating one of these objects returns the union of all of the sub-objects; - it is designed to be a faster method than chaining | (or) operations to - create a single, large union. - - Parameters - ---------- - data_objects : Iterable of YTSelectionContainer - The data objects to union - - Examples - -------- - - >>> import yt - >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") - >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1) - >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1) - >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1) - >>> new_obj = ds.union((sp1, sp2, sp3)) - >>> print(new_obj.sum("cell_volume")) - """ - - _type_name = "union" - _con_args = ("data_objects",) - - def __init__(self, data_objects, ds=None, field_parameters=None, data_source=None): - validate_iterable(data_objects) - for obj in data_objects: - validate_object(obj, YTSelectionContainer) - validate_object(ds, Dataset) - validate_object(field_parameters, dict) - validate_object(data_source, YTSelectionContainer) - YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) - # ensure_list doesn't check for tuples - if isinstance(data_objects, tuple): - data_objects = list(data_objects) - self.data_objects = ensure_list(data_objects) diff --git a/yt/data_objects/selection_objects/__init__.py b/yt/data_objects/selection_objects/__init__.py new file mode 100644 index 00000000000..dbbd7c41ae9 --- /dev/null +++ b/yt/data_objects/selection_objects/__init__.py @@ -0,0 +1,13 @@ +from .boolean_operations import ( + YTBooleanContainer, + YTDataObjectUnion, + YTIntersectionContainer3D, +) +from .cut_region import YTCutRegion +from .disk import YTDisk +from .object_collection import YTDataCollection +from .point import YTPoint +from .ray import YTOrthoRay, YTRay +from .region import YTRegion +from .slices import YTCuttingPlane, YTSlice +from .spheroids import YTEllipsoid, YTMinimalSphere, YTSphere diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py new file mode 100644 index 00000000000..5eb82b2fd96 --- /dev/null +++ b/yt/data_objects/selection_objects/base_objects.py @@ -0,0 +1,1411 @@ +import itertools +import uuid +from collections import defaultdict +from contextlib import contextmanager + +import numpy as np +from unyt.exceptions import UnitConversionError, UnitParseError + +import yt.geometry +from yt import YTArray, iterable +from yt.data_objects.data_containers import YTDataContainer +from yt.data_objects.derived_quantities import DerivedQuantityCollection +from yt.data_objects.field_data import YTFieldData +from yt.data_objects.selection_objects.boolean_operations import YTBooleanContainer +from yt.fields.field_exceptions import NeedsGridType +from yt.funcs import ensure_list, fix_axis, validate_width_tuple +from yt.geometry.selection_routines import compose_selector +from yt.units import dimensions as ytdims +from yt.utilities.exceptions import ( + YTBooleanObjectError, + YTBooleanObjectsWrongDataset, + YTDataSelectorNotImplemented, + YTDimensionalityError, + YTFieldUnitError, + YTFieldUnitParseError, + YTGenerationInProgress, +) +from yt.utilities.lib.marching_cubes import march_cubes_grid, march_cubes_grid_flux +from yt.utilities.logger import ytLogger as mylog +from yt.utilities.parallel_tools.parallel_analysis_interface import ( + ParallelAnalysisInterface, +) + + +class YTSelectionContainer(YTDataContainer, ParallelAnalysisInterface): + _locked = False + _sort_by = None + _selector = None + _current_chunk = None + _data_source = None + _dimensionality = None + _max_level = None + _min_level = None + + def __init__(self, ds, field_parameters, data_source=None): + ParallelAnalysisInterface.__init__(self) + super(YTSelectionContainer, self).__init__(ds, field_parameters) + self._data_source = data_source + if data_source is not None: + if data_source.ds != self.ds: + raise RuntimeError( + "Attempted to construct a DataContainer with a data_source " + "from a different Dataset", + ds, + data_source.ds, + ) + if data_source._dimensionality < self._dimensionality: + raise RuntimeError( + "Attempted to construct a DataContainer with a data_source " + "of lower dimensionality (%u vs %u)" + % (data_source._dimensionality, self._dimensionality) + ) + self.field_parameters.update(data_source.field_parameters) + self.quantities = DerivedQuantityCollection(self) + + @property + def selector(self): + if self._selector is not None: + return self._selector + s_module = getattr(self, "_selector_module", yt.geometry.selection_routines) + sclass = getattr(s_module, "%s_selector" % self._type_name, None) + if sclass is None: + raise YTDataSelectorNotImplemented(self._type_name) + + if self._data_source is not None: + self._selector = compose_selector( + self, self._data_source.selector, sclass(self) + ) + else: + self._selector = sclass(self) + return self._selector + + def chunks(self, fields, chunking_style, **kwargs): + # This is an iterator that will yield the necessary chunks. + self.get_data() # Ensure we have built ourselves + if fields is None: + fields = [] + # chunk_ind can be supplied in the keyword arguments. If it's a + # scalar, that'll be the only chunk that gets returned; if it's a list, + # those are the ones that will be. + chunk_ind = kwargs.pop("chunk_ind", None) + if chunk_ind is not None: + chunk_ind = ensure_list(chunk_ind) + for ci, chunk in enumerate(self.index._chunk(self, chunking_style, **kwargs)): + if chunk_ind is not None and ci not in chunk_ind: + continue + with self._chunked_read(chunk): + self.get_data(fields) + # NOTE: we yield before releasing the context + yield self + + def _identify_dependencies(self, fields_to_get, spatial=False): + inspected = 0 + fields_to_get = fields_to_get[:] + for field in itertools.cycle(fields_to_get): + if inspected >= len(fields_to_get): + break + inspected += 1 + fi = self.ds._get_field_info(*field) + fd = self.ds.field_dependencies.get( + field, None + ) or self.ds.field_dependencies.get(field[1], None) + # This is long overdue. Any time we *can't* find a field + # dependency -- for instance, if the derived field has been added + # after dataset instantiation -- let's just try to + # recalculate it. + if fd is None: + try: + fd = fi.get_dependencies(ds=self.ds) + self.ds.field_dependencies[field] = fd + except Exception: + continue + requested = self._determine_fields(list(set(fd.requested))) + deps = [d for d in requested if d not in fields_to_get] + fields_to_get += deps + return sorted(fields_to_get) + + def get_data(self, fields=None): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + if fields is None: + return + nfields = [] + apply_fields = defaultdict(list) + for field in self._determine_fields(fields): + # We need to create the field on the raw particle types + # for particles types (when the field is not directly + # defined for the derived particle type only) + finfo = self.ds.field_info[field] + + if ( + field[0] in self.ds.filtered_particle_types + and finfo._inherited_particle_filter + ): + f = self.ds.known_filters[field[0]] + apply_fields[field[0]].append((f.filtered_type, field[1])) + else: + nfields.append(field) + for filter_type in apply_fields: + f = self.ds.known_filters[filter_type] + with f.apply(self): + self.get_data(apply_fields[filter_type]) + fields = nfields + if len(fields) == 0: + return + # Now we collect all our fields + # Here is where we need to perform a validation step, so that if we + # have a field requested that we actually *can't* yet get, we put it + # off until the end. This prevents double-reading fields that will + # need to be used in spatial fields later on. + fields_to_get = [] + # This will be pre-populated with spatial fields + fields_to_generate = [] + for field in self._determine_fields(fields): + if field in self.field_data: + continue + finfo = self.ds._get_field_info(*field) + try: + finfo.check_available(self) + except NeedsGridType: + fields_to_generate.append(field) + continue + fields_to_get.append(field) + if len(fields_to_get) == 0 and len(fields_to_generate) == 0: + return + elif self._locked: + raise YTGenerationInProgress(fields) + # Track which ones we want in the end + ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) + # At this point, we want to figure out *all* our dependencies. + fields_to_get = self._identify_dependencies(fields_to_get, self._spatial) + # We now split up into readers for the types of fields + fluids, particles = [], [] + finfos = {} + for ftype, fname in fields_to_get: + finfo = self.ds._get_field_info(ftype, fname) + finfos[ftype, fname] = finfo + if finfo.sampling_type == "particle": + particles.append((ftype, fname)) + elif (ftype, fname) not in fluids: + fluids.append((ftype, fname)) + # The _read method will figure out which fields it needs to get from + # disk, and return a dict of those fields along with the fields that + # need to be generated. + read_fluids, gen_fluids = self.index._read_fluid_fields( + fluids, self, self._current_chunk + ) + for f, v in read_fluids.items(): + self.field_data[f] = self.ds.arr(v, units=finfos[f].units) + self.field_data[f].convert_to_units(finfos[f].output_units) + + read_particles, gen_particles = self.index._read_particle_fields( + particles, self, self._current_chunk + ) + + for f, v in read_particles.items(): + self.field_data[f] = self.ds.arr(v, units=finfos[f].units) + self.field_data[f].convert_to_units(finfos[f].output_units) + + fields_to_generate += gen_fluids + gen_particles + self._generate_fields(fields_to_generate) + for field in list(self.field_data.keys()): + if field not in ofields: + self.field_data.pop(field) + + def _generate_fields(self, fields_to_generate): + index = 0 + with self._field_lock(): + # At this point, we assume that any fields that are necessary to + # *generate* a field are in fact already available to us. Note + # that we do not make any assumption about whether or not the + # fields have a spatial requirement. This will be checked inside + # _generate_field, at which point additional dependencies may + # actually be noted. + while any(f not in self.field_data for f in fields_to_generate): + field = fields_to_generate[index % len(fields_to_generate)] + index += 1 + if field in self.field_data: + continue + fi = self.ds._get_field_info(*field) + try: + fd = self._generate_field(field) + if hasattr(fd, "units"): + fd.units.registry = self.ds.unit_registry + if fd is None: + raise RuntimeError + if fi.units is None: + # first time calling a field with units='auto', so we + # infer the units from the units of the data we get back + # from the field function and use these units for future + # field accesses + units = getattr(fd, "units", "") + if units == "": + dimensions = ytdims.dimensionless + else: + dimensions = units.dimensions + units = str( + units.get_base_equivalent(self.ds.unit_system.name) + ) + if fi.dimensions != dimensions: + raise YTDimensionalityError(fi.dimensions, dimensions) + fi.units = units + self.field_data[field] = self.ds.arr(fd, units) + msg = ( + "Field %s was added without specifying units, " + "assuming units are %s" + ) + mylog.warning(msg % (fi.name, units)) + try: + fd.convert_to_units(fi.units) + except AttributeError: + # If the field returns an ndarray, coerce to a + # dimensionless YTArray and verify that field is + # supposed to be unitless + fd = self.ds.arr(fd, "") + if fi.units != "": + raise YTFieldUnitError(fi, fd.units) + except UnitConversionError: + raise YTFieldUnitError(fi, fd.units) + except UnitParseError: + raise YTFieldUnitParseError(fi) + self.field_data[field] = fd + except YTGenerationInProgress as gip: + for f in gip.fields: + if f not in fields_to_generate: + fields_to_generate.append(f) + + def __or__(self, other): + if not isinstance(other, YTSelectionContainer): + raise YTBooleanObjectError(other) + if self.ds is not other.ds: + raise YTBooleanObjectsWrongDataset() + # Should maybe do something with field parameters here + return YTBooleanContainer("OR", self, other, ds=self.ds) + + def __invert__(self): + # ~obj + asel = yt.geometry.selection_routines.AlwaysSelector(self.ds) + return YTBooleanContainer("NOT", self, asel, ds=self.ds) + + def __xor__(self, other): + if not isinstance(other, YTSelectionContainer): + raise YTBooleanObjectError(other) + if self.ds is not other.ds: + raise YTBooleanObjectsWrongDataset() + return YTBooleanContainer("XOR", self, other, ds=self.ds) + + def __and__(self, other): + if not isinstance(other, YTSelectionContainer): + raise YTBooleanObjectError(other) + if self.ds is not other.ds: + raise YTBooleanObjectsWrongDataset() + return YTBooleanContainer("AND", self, other, ds=self.ds) + + def __add__(self, other): + return self.__or__(other) + + def __sub__(self, other): + if not isinstance(other, YTSelectionContainer): + raise YTBooleanObjectError(other) + if self.ds is not other.ds: + raise YTBooleanObjectsWrongDataset() + return YTBooleanContainer("NEG", self, other, ds=self.ds) + + @contextmanager + def _field_lock(self): + self._locked = True + yield + self._locked = False + + @contextmanager + def _ds_hold(self, new_ds): + """ + This contextmanager is used to take a data object and preserve its + attributes but allow the dataset that underlies it to be swapped out. + This is typically only used internally, and differences in unit systems + may present interesting possibilities. + """ + old_ds = self.ds + old_index = self._index + self.ds = new_ds + self._index = new_ds.index + old_chunk_info = self._chunk_info + old_chunk = self._current_chunk + old_size = self.size + self._chunk_info = None + self._current_chunk = None + self.size = None + self._index._identify_base_chunk(self) + with self._chunked_read(None): + yield + self._index = old_index + self.ds = old_ds + self._chunk_info = old_chunk_info + self._current_chunk = old_chunk + self.size = old_size + + @contextmanager + def _chunked_read(self, chunk): + # There are several items that need to be swapped out + # field_data, size, shape + obj_field_data = [] + if hasattr(chunk, "objs"): + for obj in chunk.objs: + obj_field_data.append(obj.field_data) + obj.field_data = YTFieldData() + old_field_data, self.field_data = self.field_data, YTFieldData() + old_chunk, self._current_chunk = self._current_chunk, chunk + old_locked, self._locked = self._locked, False + yield + self.field_data = old_field_data + self._current_chunk = old_chunk + self._locked = old_locked + if hasattr(chunk, "objs"): + for obj in chunk.objs: + obj.field_data = obj_field_data.pop(0) + + @contextmanager + def _activate_cache(self): + cache = self._field_cache or {} + old_fields = {} + for field in (f for f in cache if f in self.field_data): + old_fields[field] = self.field_data[field] + self.field_data.update(cache) + yield + for field in cache: + self.field_data.pop(field) + if field in old_fields: + self.field_data[field] = old_fields.pop(field) + self._field_cache = None + + def _initialize_cache(self, cache): + # Wipe out what came before + self._field_cache = {} + self._field_cache.update(cache) + + @property + def icoords(self): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + return self._current_chunk.icoords + + @property + def fcoords(self): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + return self._current_chunk.fcoords + + @property + def ires(self): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + return self._current_chunk.ires + + @property + def fwidth(self): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + return self._current_chunk.fwidth + + @property + def fcoords_vertex(self): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + return self._current_chunk.fcoords_vertex + + @property + def max_level(self): + if self._max_level is None: + try: + return self.ds.max_level + except AttributeError: + return None + return self._max_level + + @max_level.setter + def max_level(self, value): + if self._selector is not None: + del self._selector + self._selector = None + self._current_chunk = None + self.size = None + self.shape = None + self.field_data.clear() + self._max_level = value + + @property + def min_level(self): + if self._min_level is None: + try: + return 0 + except AttributeError: + return None + return self._min_level + + @min_level.setter + def min_level(self, value): + if self._selector is not None: + del self._selector + self._selector = None + self.field_data.clear() + self.size = None + self.shape = None + self._current_chunk = None + self._min_level = value + + +class YTSelectionContainer0D(YTSelectionContainer): + _spatial = False + _dimensionality = 0 + + def __init__(self, ds, field_parameters=None, data_source=None): + super(YTSelectionContainer0D, self).__init__(ds, field_parameters, data_source) + + +class YTSelectionContainer1D(YTSelectionContainer): + _spatial = False + _dimensionality = 1 + + def __init__(self, ds, field_parameters=None, data_source=None): + super(YTSelectionContainer1D, self).__init__(ds, field_parameters, data_source) + self._grids = None + self._sortkey = None + self._sorted = {} + + +class YTSelectionContainer2D(YTSelectionContainer): + _key_fields = ["px", "py", "pdx", "pdy"] + _dimensionality = 2 + """ + Prepares the YTSelectionContainer2D, normal to *axis*. If *axis* is 4, we are not + aligned with any axis. + """ + _spatial = False + + def __init__(self, axis, ds, field_parameters=None, data_source=None): + super(YTSelectionContainer2D, self).__init__(ds, field_parameters, data_source) + # We need the ds, which will exist by now, for fix_axis. + self.axis = fix_axis(axis, self.ds) + self.set_field_parameter("axis", axis) + + def _convert_field_name(self, field): + return field + + def _get_pw(self, fields, center, width, origin, plot_type): + from yt.visualization.fixed_resolution import FixedResolutionBuffer as frb + from yt.visualization.plot_window import PWViewerMPL, get_window_parameters + + axis = self.axis + skip = self._key_fields + skip += list(set(frb._exclude_fields).difference(set(self._key_fields))) + self.fields = [k for k in self.field_data if k not in skip] + if fields is not None: + self.fields = ensure_list(fields) + self.fields + if len(self.fields) == 0: + raise ValueError("No fields found to plot in get_pw") + (bounds, center, display_center) = get_window_parameters( + axis, center, width, self.ds + ) + pw = PWViewerMPL( + self, + bounds, + fields=self.fields, + origin=origin, + frb_generator=frb, + plot_type=plot_type, + ) + pw._setup_plots() + return pw + + def to_frb(self, width, resolution, center=None, height=None, periodic=False): + r"""This function returns a FixedResolutionBuffer generated from this + object. + + A FixedResolutionBuffer is an object that accepts a variable-resolution + 2D object and transforms it into an NxM bitmap that can be plotted, + examined or processed. This is a convenience function to return an FRB + directly from an existing 2D data object. + + Parameters + ---------- + width : width specifier + This can either be a floating point value, in the native domain + units of the simulation, or a tuple of the (value, unit) style. + This will be the width of the FRB. + height : height specifier + This will be the physical height of the FRB, by default it is equal + to width. Note that this will not make any corrections to + resolution for the aspect ratio. + resolution : int or tuple of ints + The number of pixels on a side of the final FRB. If iterable, this + will be the width then the height. + center : array-like of floats, optional + The center of the FRB. If not specified, defaults to the center of + the current object. + periodic : bool + Should the returned Fixed Resolution Buffer be periodic? (default: + False). + + Returns + ------- + frb : :class:`~yt.visualization.fixed_resolution.FixedResolutionBuffer` + A fixed resolution buffer, which can be queried for fields. + + Examples + -------- + + >>> proj = ds.proj("Density", 0) + >>> frb = proj.to_frb( (100.0, 'kpc'), 1024) + >>> write_image(np.log10(frb["Density"]), 'density_100kpc.png') + """ + + if (self.ds.geometry == "cylindrical" and self.axis == 1) or ( + self.ds.geometry == "polar" and self.axis == 2 + ): + if center is not None and center != (0.0, 0.0): + raise NotImplementedError( + "Currently we only support images centered at R=0. " + + "We plan to generalize this in the near future" + ) + from yt.visualization.fixed_resolution import ( + CylindricalFixedResolutionBuffer, + ) + + validate_width_tuple(width) + if iterable(resolution): + resolution = max(resolution) + frb = CylindricalFixedResolutionBuffer(self, width, resolution) + return frb + + if center is None: + center = self.center + if center is None: + center = (self.ds.domain_right_edge + self.ds.domain_left_edge) / 2.0 + elif iterable(center) and not isinstance(center, YTArray): + center = self.ds.arr(center, "code_length") + if iterable(width): + w, u = width + if isinstance(w, tuple) and isinstance(u, tuple): + height = u + w, u = w + width = self.ds.quan(w, units=u) + elif not isinstance(width, YTArray): + width = self.ds.quan(width, "code_length") + if height is None: + height = width + elif iterable(height): + h, u = height + height = self.ds.quan(h, units=u) + elif not isinstance(height, YTArray): + height = self.ds.quan(height, "code_length") + if not iterable(resolution): + resolution = (resolution, resolution) + from yt.visualization.fixed_resolution import FixedResolutionBuffer + + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + bounds = ( + center[xax] - width * 0.5, + center[xax] + width * 0.5, + center[yax] - height * 0.5, + center[yax] + height * 0.5, + ) + frb = FixedResolutionBuffer(self, bounds, resolution, periodic=periodic) + return frb + + +class YTSelectionContainer3D(YTSelectionContainer): + """ + Returns an instance of YTSelectionContainer3D, or prepares one. Usually only + used as a base class. Note that *center* is supplied, but only used + for fields and quantities that require it. + """ + + _key_fields = ["x", "y", "z", "dx", "dy", "dz"] + _spatial = False + _num_ghost_zones = 0 + _dimensionality = 3 + + def __init__(self, center, ds, field_parameters=None, data_source=None): + super(YTSelectionContainer3D, self).__init__(ds, field_parameters, data_source) + self._set_center(center) + self.coords = None + self._grids = None + + def cut_region(self, field_cuts, field_parameters=None, locals=None): + """ + Return a YTCutRegion, where the a cell is identified as being inside + the cut region based on the value of one or more fields. Note that in + previous versions of yt the name 'grid' was used to represent the data + object used to construct the field cut, as of yt 3.0, this has been + changed to 'obj'. + + Parameters + ---------- + field_cuts : list of strings + A list of conditionals that will be evaluated. In the namespace + available, these conditionals will have access to 'obj' which is a + data object of unknown shape, and they must generate a boolean array. + For instance, conditionals = ["obj['temperature'] < 1e3"] + field_parameters : dictionary + A dictionary of field parameters to be used when applying the field + cuts. + locals : dictionary + A dictionary of local variables to use when defining the cut region. + + Examples + -------- + To find the total mass of hot gas with temperature greater than 10^6 K + in your volume: + + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.cut_region(["obj['temperature'] > 1e6"]) + >>> print(cr.quantities.total_quantity("cell_mass").in_units('Msun')) + """ + if locals is None: + locals = {} + cr = self.ds.cut_region( + self, field_cuts, field_parameters=field_parameters, locals=locals + ) + return cr + + def exclude_above(self, field, value, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field is above a given value are masked. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field above the given value masked. + + Example + ------- + + To find the total mass of hot gas with temperature colder than 10^6 K + in your volume: + + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_above('temperature', 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + + """ + if units is None: + field_cuts = 'obj["' + field + '"] <= ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") <= ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def include_above(self, field, value, units=None): + """ + This function will return a YTCutRegion where only the regions + whose field is above a given value are included. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field above the given value masked. + + Example + ------- + + To find the total mass of hot gas with temperature warmer than 10^6 K + in your volume: + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.include_above('temperature', 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + + if units is None: + field_cuts = 'obj["' + field + '"] > ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") > ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def exclude_equal(self, field, value, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field are equal to given value are masked. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field equal to the given value masked. + + Example + ------- + + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_equal('temperature', 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = 'obj["' + field + '"] != ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") != ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def include_equal(self, field, value, units=None): + """ + This function will return a YTCutRegion where only the regions + whose field are equal to given value are included. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field equal to the given value included. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.include_equal('temperature', 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = 'obj["' + field + '"] == ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") == ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def exclude_inside(self, field, min_value, max_value, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field are inside the interval from min_value to max_value. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + min_value : float + The minimum value inside the interval to be excluded. + max_value : float + The maximum value inside the interval to be excluded. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field inside the given interval excluded. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_inside('temperature', 1e5, 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = ( + '(obj["' + + field + + '"] <= ' + + str(min_value) + + ') | (obj["' + + field + + '"] >= ' + + str(max_value) + + ")" + ) + else: + field_cuts = ( + '(obj["' + + field + + '"].in_units("' + + units + + '") <= ' + + str(min_value) + + ') | (obj["' + + field + + '"].in_units("' + + units + + '") >= ' + + str(max_value) + + ")" + ) + cr = self.cut_region(field_cuts) + return cr + + def include_inside(self, field, min_value, max_value, units=None): + """ + This function will return a YTCutRegion where only the regions + whose field are inside the interval from min_value to max_value are + included. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + min_value : float + The minimum value inside the interval to be excluded. + max_value : float + The maximum value inside the interval to be excluded. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field inside the given interval excluded. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.include_inside('temperature', 1e5, 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = ( + '(obj["' + + field + + '"] > ' + + str(min_value) + + ') & (obj["' + + field + + '"] < ' + + str(max_value) + + ")" + ) + else: + field_cuts = ( + '(obj["' + + field + + '"].in_units("' + + units + + '") > ' + + str(min_value) + + ') & (obj["' + + field + + '"].in_units("' + + units + + '") < ' + + str(max_value) + + ")" + ) + cr = self.cut_region(field_cuts) + return cr + + def exclude_outside(self, field, min_value, max_value, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field are outside the interval from min_value to max_value. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + min_value : float + The minimum value inside the interval to be excluded. + max_value : float + The maximum value inside the interval to be excluded. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field outside the given interval excluded. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_outside('temperature', 1e5, 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + cr = self.exclude_below(field, min_value, units) + cr = cr.exclude_above(field, max_value, units) + return cr + + def include_outside(self, field, min_value, max_value, units=None): + """ + This function will return a YTCutRegion where only the regions + whose field are outside the interval from min_value to max_value are + included. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + min_value : float + The minimum value inside the interval to be excluded. + max_value : float + The maximum value inside the interval to be excluded. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field outside the given interval excluded. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_outside('temperature', 1e5, 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + cr = self.exclude_inside(field, min_value, max_value, units) + return cr + + def exclude_below(self, field, value, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field is below a given value are masked. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the field below the given value masked. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_below('temperature', 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = 'obj["' + field + '"] >= ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") >= ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def exclude_nan(self, field, units=None): + """ + This function will return a YTCutRegion where all of the regions + whose field is NaN are masked. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with the NaN entries of the field masked. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.exclude_nan('temperature') + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = '~np.isnan(obj["' + field + '"])' + else: + field_cuts = '~np.isnan(obj["' + field + '"].in_units("' + units + '"))' + cr = self.cut_region(field_cuts, locals={"np": np}) + return cr + + def include_below(self, field, value, units=None): + """ + This function will return a YTCutRegion where only the regions + whose field is below a given value are included. + + Parameters + ---------- + field : string + The field in which the conditional will be applied. + value : float + The minimum value that will not be masked in the output + YTCutRegion. + units : string or None + The units of the value threshold. None will use the default units + given in the field. + + Returns + ------- + cut_region : YTCutRegion + The YTCutRegion with only regions with the field below the given + value included. + + Example + ------- + >>> ds = yt.load("RedshiftOutput0005") + >>> ad = ds.all_data() + >>> cr = ad.include_below('temperature', 1e5, 1e6) + >>> print cr.quantities.total_quantity("cell_mass").in_units('Msun') + """ + if units is None: + field_cuts = 'obj["' + field + '"] < ' + str(value) + else: + field_cuts = ( + 'obj["' + field + '"].in_units("' + units + '") < ' + str(value) + ) + cr = self.cut_region(field_cuts) + return cr + + def extract_isocontours( + self, field, value, filename=None, rescale=False, sample_values=None + ): + r"""This identifies isocontours on a cell-by-cell basis, with no + consideration of global connectedness, and returns the vertices of the + Triangles in that isocontour. + + This function simply returns the vertices of all the triangles + calculated by the `marching cubes + `_ algorithm; for more + complex operations, such as identifying connected sets of cells above a + given threshold, see the extract_connected_sets function. This is more + useful for calculating, for instance, total isocontour area, or + visualizing in an external program (such as `MeshLab + `_.) + + Parameters + ---------- + field : string + Any field that can be obtained in a data object. This is the field + which will be isocontoured. + value : float + The value at which the isocontour should be calculated. + filename : string, optional + If supplied, this file will be filled with the vertices in .obj + format. Suitable for loading into meshlab. + rescale : bool, optional + If true, the vertices will be rescaled within their min/max. + sample_values : string, optional + Any field whose value should be extracted at the center of each + triangle. + + Returns + ------- + verts : array of floats + The array of vertices, x,y,z. Taken in threes, these are the + triangle vertices. + samples : array of floats + If `sample_values` is specified, this will be returned and will + contain the values of the field specified at the center of each + triangle. + + Examples + -------- + This will create a data object, find a nice value in the center, and + output the vertices to "triangles.obj" after rescaling them. + + >>> dd = ds.all_data() + >>> rho = dd.quantities["WeightedAverageQuantity"]( + ... "Density", weight="CellMassMsun") + >>> verts = dd.extract_isocontours("Density", rho, + ... "triangles.obj", True) + """ + from yt.data_objects.static_output import ParticleDataset + from yt.frontends.stream.data_structures import StreamParticlesDataset + + verts = [] + samples = [] + if isinstance(self.ds, (ParticleDataset, StreamParticlesDataset)): + raise NotImplementedError + for block, mask in self.blocks: + my_verts = self._extract_isocontours_from_grid( + block, mask, field, value, sample_values + ) + if sample_values is not None: + my_verts, svals = my_verts + samples.append(svals) + verts.append(my_verts) + verts = np.concatenate(verts).transpose() + verts = self.comm.par_combine_object(verts, op="cat", datatype="array") + verts = verts.transpose() + if sample_values is not None: + samples = np.concatenate(samples) + samples = self.comm.par_combine_object(samples, op="cat", datatype="array") + if rescale: + mi = np.min(verts, axis=0) + ma = np.max(verts, axis=0) + verts = (verts - mi) / (ma - mi).max() + if filename is not None and self.comm.rank == 0: + if hasattr(filename, "write"): + f = filename + else: + f = open(filename, "w") + for v1 in verts: + f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2])) + for i in range(len(verts) // 3): + f.write("f %s %s %s\n" % (i * 3 + 1, i * 3 + 2, i * 3 + 3)) + if not hasattr(filename, "write"): + f.close() + if sample_values is not None: + return verts, samples + return verts + + def _extract_isocontours_from_grid( + self, grid, mask, field, value, sample_values=None + ): + vc_fields = [field] + if sample_values is not None: + vc_fields.append(sample_values) + + vc_data = grid.get_vertex_centered_data(vc_fields, no_ghost=False) + try: + svals = vc_data[sample_values] + except KeyError: + svals = None + + my_verts = march_cubes_grid( + value, vc_data[field], mask, grid.LeftEdge, grid.dds, svals + ) + return my_verts + + def calculate_isocontour_flux( + self, field, value, field_x, field_y, field_z, fluxing_field=None + ): + r"""This identifies isocontours on a cell-by-cell basis, with no + consideration of global connectedness, and calculates the flux over + those contours. + + This function will conduct `marching cubes + `_ on all the cells in a + given data container (grid-by-grid), and then for each identified + triangular segment of an isocontour in a given cell, calculate the + gradient (i.e., normal) in the isocontoured field, interpolate the local + value of the "fluxing" field, the area of the triangle, and then return: + + area * local_flux_value * (n dot v) + + Where area, local_value, and the vector v are interpolated at the barycenter + (weighted by the vertex values) of the triangle. Note that this + specifically allows for the field fluxing across the surface to be + *different* from the field being contoured. If the fluxing_field is + not specified, it is assumed to be 1.0 everywhere, and the raw flux + with no local-weighting is returned. + + Additionally, the returned flux is defined as flux *into* the surface, + not flux *out of* the surface. + + Parameters + ---------- + field : string + Any field that can be obtained in a data object. This is the field + which will be isocontoured and used as the "local_value" in the + flux equation. + value : float + The value at which the isocontour should be calculated. + field_x : string + The x-component field + field_y : string + The y-component field + field_z : string + The z-component field + fluxing_field : string, optional + The field whose passage over the surface is of interest. If not + specified, assumed to be 1.0 everywhere. + + Returns + ------- + flux : float + The summed flux. Note that it is not currently scaled; this is + simply the code-unit area times the fields. + + Examples + -------- + This will create a data object, find a nice value in the center, and + calculate the metal flux over it. + + >>> dd = ds.all_data() + >>> rho = dd.quantities["WeightedAverageQuantity"]( + ... "Density", weight="CellMassMsun") + >>> flux = dd.calculate_isocontour_flux("Density", rho, + ... "velocity_x", "velocity_y", "velocity_z", "Metal_Density") + """ + flux = 0.0 + for block, mask in self.blocks: + flux += self._calculate_flux_in_grid( + block, mask, field, value, field_x, field_y, field_z, fluxing_field + ) + flux = self.comm.mpi_allreduce(flux, op="sum") + return flux + + def _calculate_flux_in_grid( + self, grid, mask, field, value, field_x, field_y, field_z, fluxing_field=None + ): + + vc_fields = [field, field_x, field_y, field_z] + if fluxing_field is not None: + vc_fields.append(fluxing_field) + + vc_data = grid.get_vertex_centered_data(vc_fields) + + if fluxing_field is None: + ff = np.ones_like(vc_data[field], dtype="float64") + else: + ff = vc_data[fluxing_field] + + return march_cubes_grid_flux( + value, + vc_data[field], + vc_data[field_x], + vc_data[field_y], + vc_data[field_z], + ff, + mask, + grid.LeftEdge, + grid.dds, + ) + + def extract_connected_sets( + self, field, num_levels, min_val, max_val, log_space=True, cumulative=True + ): + """ + This function will create a set of contour objects, defined + by having connected cell structures, which can then be + studied and used to 'paint' their source grids, thus enabling + them to be plotted. + + Note that this function *can* return a connected set object that has no + member values. + """ + if log_space: + cons = np.logspace(np.log10(min_val), np.log10(max_val), num_levels + 1) + else: + cons = np.linspace(min_val, max_val, num_levels + 1) + contours = {} + for level in range(num_levels): + contours[level] = {} + if cumulative: + mv = max_val + else: + mv = cons[level + 1] + from yt.data_objects.level_sets.api import identify_contours + from yt.data_objects.level_sets.clump_handling import add_contour_field + + nj, cids = identify_contours(self, field, cons[level], mv) + unique_contours = set([]) + for sl_list in cids.values(): + for _sl, ff in sl_list: + unique_contours.update(np.unique(ff)) + contour_key = uuid.uuid4().hex + # In case we're a cut region already... + base_object = getattr(self, "base_object", self) + add_contour_field(base_object.ds, contour_key) + for cid in sorted(unique_contours): + if cid == -1: + continue + contours[level][cid] = base_object.cut_region( + ["obj['contours_%s'] == %s" % (contour_key, cid)], + {"contour_slices_%s" % contour_key: cids}, + ) + return cons, contours + + def _get_bbox(self): + """ + Return the bounding box for this data container. + This generic version will return the bounds of the entire domain. + """ + return self.ds.domain_left_edge, self.ds.domain_right_edge + + def get_bbox(self): + """ + Return the bounding box for this data container. + """ + if self.ds.geometry != "cartesian": + raise NotImplementedError( + "get_bbox is currently only implemented " "for cartesian geometries!" + ) + le, re = self._get_bbox() + le.convert_to_units("code_length") + re.convert_to_units("code_length") + return le, re + + def volume(self): + """ + Return the volume of the data container. + This is found by adding up the volume of the cells with centers + in the container, rather than using the geometric shape of + the container, so this may vary very slightly + from what might be expected from the geometric volume. + """ + return self.quantities.total_quantity(("index", "cell_volume")) diff --git a/yt/data_objects/selection_objects/boolean_operations.py b/yt/data_objects/selection_objects/boolean_operations.py new file mode 100644 index 00000000000..65fd120ffbc --- /dev/null +++ b/yt/data_objects/selection_objects/boolean_operations.py @@ -0,0 +1,149 @@ +import numpy as np + +import yt.geometry +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ensure_list, validate_iterable, validate_object + + +class YTBooleanContainer(YTSelectionContainer3D): + """ + This is a boolean operation, accepting AND, OR, XOR, and NOT for combining + multiple data objects. + + This object is not designed to be created directly; it is designed to be + created implicitly by using one of the bitwise operations (&, \|, ^, \~) on + one or two other data objects. These correspond to the appropriate boolean + operations, and the resultant object can be nested. + + Parameters + ---------- + op : string + Can be AND, OR, XOR, NOT or NEG. + dobj1 : yt.data_objects.selection_objects.base_objects.YTSelectionContainer + The first selection object + dobj2 : yt.data_objects.selection_objects.base_objects.YTSelectionContainer + The second object + + Examples + -------- + + >>> import yt + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> sp = ds.sphere("c", 0.1) + >>> dd = ds.r[:,:,:] + >>> new_obj = sp ^ dd + >>> print(new_obj.sum("cell_volume"), dd.sum("cell_volume") - + ... sp.sum("cell_volume")) + """ + + _type_name = "bool" + _con_args = ("op", "dobj1", "dobj2") + + def __init__( + self, op, dobj1, dobj2, ds=None, field_parameters=None, data_source=None + ): + YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) + self.op = op.upper() + self.dobj1 = dobj1 + self.dobj2 = dobj2 + name = "Boolean%sSelector" % (self.op,) + sel_cls = getattr(yt.geometry.selection_routines, name) + self._selector = sel_cls(self) + + def _get_bbox(self): + le1, re1 = self.dobj1._get_bbox() + if self.op == "NOT": + return le1, re1 + else: + le2, re2 = self.dobj2._get_bbox() + return np.minimum(le1, le2), np.maximum(re1, re2) + + +class YTIntersectionContainer3D(YTSelectionContainer3D): + """ + This is a more efficient method of selecting the intersection of multiple + data selection objects. + + Creating one of these objects returns the intersection of all of the + sub-objects; it is designed to be a faster method than chaining & ("and") + operations to create a single, large intersection. + + Parameters + ---------- + data_objects : Iterable of YTSelectionContainer + The data objects to intersect + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.15) + >>> sp2 = ds.sphere((0.38, 0.51, 0.55), 0.1) + >>> sp3 = ds.sphere((0.35, 0.5, 0.6), 0.15) + >>> new_obj = ds.intersection((sp1, sp2, sp3)) + >>> print(new_obj.sum("cell_volume")) + """ + + _type_name = "intersection" + _con_args = ("data_objects",) + + def __init__(self, data_objects, ds=None, field_parameters=None, data_source=None): + validate_iterable(data_objects) + for obj in data_objects: + validate_object(obj, YTSelectionContainer) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) + # ensure_list doesn't check for tuples + if isinstance(data_objects, tuple): + data_objects = list(data_objects) + self.data_objects = ensure_list(data_objects) + + +class YTDataObjectUnion(YTSelectionContainer3D): + """ + This is a more efficient method of selecting the union of multiple + data selection objects. + + Creating one of these objects returns the union of all of the sub-objects; + it is designed to be a faster method than chaining | (or) operations to + create a single, large union. + + Parameters + ---------- + data_objects : Iterable of YTSelectionContainer + The data objects to union + + Examples + -------- + + >>> import yt + >>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") + >>> sp1 = ds.sphere((0.4, 0.5, 0.6), 0.1) + >>> sp2 = ds.sphere((0.3, 0.5, 0.15), 0.1) + >>> sp3 = ds.sphere((0.5, 0.5, 0.9), 0.1) + >>> new_obj = ds.union((sp1, sp2, sp3)) + >>> print(new_obj.sum("cell_volume")) + """ + + _type_name = "union" + _con_args = ("data_objects",) + + def __init__(self, data_objects, ds=None, field_parameters=None, data_source=None): + validate_iterable(data_objects) + for obj in data_objects: + validate_object(obj, YTSelectionContainer) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer3D.__init__(self, None, ds, field_parameters, data_source) + # ensure_list doesn't check for tuples + if isinstance(data_objects, tuple): + data_objects = list(data_objects) + self.data_objects = ensure_list(data_objects) diff --git a/yt/data_objects/selection_objects/cut_region.py b/yt/data_objects/selection_objects/cut_region.py new file mode 100644 index 00000000000..8399e95e844 --- /dev/null +++ b/yt/data_objects/selection_objects/cut_region.py @@ -0,0 +1,247 @@ +import numpy as np + +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ensure_list, validate_iterable, validate_object +from yt.geometry.selection_routines import points_in_cells +from yt.utilities.exceptions import YTIllDefinedCutRegion +from yt.utilities.on_demand_imports import _scipy + + +class YTCutRegion(YTSelectionContainer3D): + """ + This is a data object designed to allow individuals to apply logical + operations to fields and filter as a result of those cuts. + + Parameters + ---------- + data_source : YTSelectionContainer3D + The object to which cuts will be applied. + conditionals : list of strings + A list of conditionals that will be evaluated. In the namespace + available, these conditionals will have access to 'obj' which is a data + object of unknown shape, and they must generate a boolean array. For + instance, conditionals = ["obj['temperature'] < 1e3"] + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> sp = ds.sphere("max", (1.0, 'Mpc')) + >>> cr = ds.cut_region(sp, ["obj['temperature'] < 1e3"]) + """ + + _type_name = "cut_region" + _con_args = ("base_object", "conditionals") + + def __init__( + self, + data_source, + conditionals, + ds=None, + field_parameters=None, + base_object=None, + locals=None, + ): + if locals is None: + locals = {} + validate_object(data_source, YTSelectionContainer) + validate_iterable(conditionals) + for condition in conditionals: + validate_object(condition, str) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(base_object, YTSelectionContainer) + if base_object is not None: + # passing base_object explicitly has been deprecated, + # but we handle it here for backward compatibility + if data_source is not None: + raise RuntimeError("Cannot use both base_object and data_source") + data_source = base_object + + self.conditionals = ensure_list(conditionals) + if isinstance(data_source, YTCutRegion): + # If the source is also a cut region, add its conditionals + # and set the source to be its source. + # Preserve order of conditionals. + self.conditionals = data_source.conditionals + self.conditionals + data_source = data_source.base_object + + super(YTCutRegion, self).__init__( + data_source.center, ds, field_parameters, data_source=data_source + ) + self.base_object = data_source + self.locals = locals + self._selector = None + # Need to interpose for __getitem__, fwidth, fcoords, icoords, iwidth, + # ires and get_data + + def chunks(self, fields, chunking_style, **kwargs): + # We actually want to chunk the sub-chunk, not ourselves. We have no + # chunks to speak of, as we do not data IO. + for chunk in self.index._chunk(self.base_object, chunking_style, **kwargs): + with self.base_object._chunked_read(chunk): + with self._chunked_read(chunk): + self.get_data(fields) + yield self + + def get_data(self, fields=None): + fields = ensure_list(fields) + self.base_object.get_data(fields) + ind = self._cond_ind + for field in fields: + f = self.base_object[field] + if f.shape != ind.shape: + parent = getattr(self, "parent", self.base_object) + self.field_data[field] = parent[field][self._part_ind(field[0])] + else: + self.field_data[field] = self.base_object[field][ind] + + @property + def blocks(self): + # We have to take a slightly different approach here. Note that all + # that .blocks has to yield is a 3D array and a mask. + for obj, m in self.base_object.blocks: + m = m.copy() + with obj._field_parameter_state(self.field_parameters): + for cond in self.conditionals: + ss = eval(cond) + m = np.logical_and(m, ss, m) + if not np.any(m): + continue + yield obj, m + + @property + def _cond_ind(self): + ind = None + obj = self.base_object + locals = self.locals.copy() + if "obj" in locals: + raise RuntimeError( + '"obj" has been defined in the "locals" ; this is not supported, please rename the variable.' + ) + locals["obj"] = obj + with obj._field_parameter_state(self.field_parameters): + for cond in self.conditionals: + res = eval(cond, locals) + if ind is None: + ind = res + if ind.shape != res.shape: + raise YTIllDefinedCutRegion(self.conditionals) + np.logical_and(res, ind, ind) + return ind + + def _part_ind_KDTree(self, ptype): + """Find the particles in cells using a KDTree approach.""" + parent = getattr(self, "parent", self.base_object) + units = "code_length" + + pos = np.stack( + [ + self[("index", "x")].to(units), + self[("index", "y")].to(units), + self[("index", "z")].to(units), + ], + axis=1, + ).value + dx = np.stack( + [ + self[("index", "dx")].to(units), + self[("index", "dy")].to(units), + self[("index", "dz")].to(units), + ], + axis=1, + ).value + ppos = np.stack( + [ + parent[(ptype, "particle_position_x")], + parent[(ptype, "particle_position_y")], + parent[(ptype, "particle_position_z")], + ], + axis=1, + ).value + levels = self[("index", "grid_level")].astype("int32").value + levelmin = levels.min() + levelmax = levels.max() + + mask = np.zeros(ppos.shape[0], dtype=bool) + + for lvl in range(levelmax, levelmin - 1, -1): + # Filter out cells not in the current level + lvl_mask = levels == lvl + dx_loc = dx[lvl_mask] + pos_loc = pos[lvl_mask] + + grid_tree = _scipy.spatial.cKDTree(pos_loc, boxsize=1) + + # Compute closest cell for all remaining particles + dist, icell = grid_tree.query( + ppos[~mask], distance_upper_bound=dx_loc.max(), p=np.inf + ) + mask_loc = np.isfinite(dist[:]) + + # Check that particles within dx of a cell are in it + i = icell[mask_loc] + dist = np.abs(ppos[~mask][mask_loc, :] - pos_loc[i]) + tmp_mask = np.all(dist <= (dx_loc[i] / 2), axis=1) + + mask_loc[mask_loc] = tmp_mask + + # Update the particle mask with particles found at this level + mask[~mask] |= mask_loc + + return mask + + def _part_ind_brute_force(self, ptype): + parent = getattr(self, "parent", self.base_object) + units = "code_length" + mask = points_in_cells( + self[("index", "x")].to(units), + self[("index", "y")].to(units), + self[("index", "z")].to(units), + self[("index", "dx")].to(units), + self[("index", "dy")].to(units), + self[("index", "dz")].to(units), + parent[(ptype, "particle_position_x")].to(units), + parent[(ptype, "particle_position_y")].to(units), + parent[(ptype, "particle_position_z")].to(units), + ) + + return mask + + def _part_ind(self, ptype): + # If scipy is installed, use the fast KD tree + # implementation. Else, fall back onto the direct + # brute-force algorithm. + try: + _scipy.spatial.KDTree + return self._part_ind_KDTree(ptype) + except ImportError: + return self._part_ind_brute_force(ptype) + + @property + def icoords(self): + return self.base_object.icoords[self._cond_ind, :] + + @property + def fcoords(self): + return self.base_object.fcoords[self._cond_ind, :] + + @property + def ires(self): + return self.base_object.ires[self._cond_ind] + + @property + def fwidth(self): + return self.base_object.fwidth[self._cond_ind, :] + + def _get_bbox(self): + """ + Get the bounding box for the cut region. Here we just use + the bounding box for the source region. + """ + return self.base_object._get_bbox() diff --git a/yt/data_objects/selection_objects/disk.py b/yt/data_objects/selection_objects/disk.py new file mode 100644 index 00000000000..b65648bcdcd --- /dev/null +++ b/yt/data_objects/selection_objects/disk.py @@ -0,0 +1,95 @@ +import numpy as np + +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ( + fix_length, + validate_3d_array, + validate_center, + validate_float, + validate_iterable, + validate_object, +) + + +class YTDisk(YTSelectionContainer3D): + """ + By providing a *center*, a *normal*, a *radius* and a *height* we + can define a cylinder of any proportion. Only cells whose centers are + within the cylinder will be selected. + + Parameters + ---------- + center : array_like + coordinate to which the normal, radius, and height all reference + normal : array_like + the normal vector defining the direction of lengthwise part of the + cylinder + radius : float + the radius of the cylinder + height : float + the distance from the midplane of the cylinder to the top and + bottom planes + fields : array of fields, optional + any fields to be pre-loaded in the cylinder object + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the data_set + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> c = [0.5,0.5,0.5] + >>> disk = ds.disk(c, [1,0,0], (1, 'kpc'), (10, 'kpc')) + """ + + _type_name = "disk" + _con_args = ("center", "_norm_vec", "radius", "height") + + def __init__( + self, + center, + normal, + radius, + height, + fields=None, + ds=None, + field_parameters=None, + data_source=None, + ): + validate_center(center) + validate_3d_array(normal) + validate_float(radius) + validate_float(height) + validate_iterable(fields) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) + self._norm_vec = np.array(normal) / np.sqrt(np.dot(normal, normal)) + self.set_field_parameter("normal", self._norm_vec) + self.set_field_parameter("center", self.center) + self.height = fix_length(height, self.ds) + self.radius = fix_length(radius, self.ds) + self._d = -1.0 * np.dot(self._norm_vec, self.center) + + def _get_bbox(self): + """ + Return the minimum bounding box for the disk. + """ + # http://www.iquilezles.org/www/articles/diskbbox/diskbbox.htm + pa = self.center + self._norm_vec * self.height + pb = self.center - self._norm_vec * self.height + a = pa - pb + db = self.radius * np.sqrt(1.0 - a.d * a.d / np.dot(a, a)) + return np.minimum(pa - db, pb - db), np.maximum(pa + db, pb + db) diff --git a/yt/data_objects/selection_objects/object_collection.py b/yt/data_objects/selection_objects/object_collection.py new file mode 100644 index 00000000000..1e3b98b1dbc --- /dev/null +++ b/yt/data_objects/selection_objects/object_collection.py @@ -0,0 +1,31 @@ +import numpy as np + +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import validate_center, validate_iterable, validate_object + + +class YTDataCollection(YTSelectionContainer3D): + """ + By selecting an arbitrary *object_list*, we can act on those grids. + Child cells are not returned. + """ + + _type_name = "data_collection" + _con_args = ("_obj_list",) + + def __init__( + self, obj_list, ds=None, field_parameters=None, data_source=None, center=None + ): + validate_iterable(obj_list) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + if center is not None: + validate_center(center) + YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) + self._obj_ids = np.array([o.id - o._id_offset for o in obj_list], dtype="int64") + self._obj_list = obj_list diff --git a/yt/data_objects/selection_objects/point.py b/yt/data_objects/selection_objects/point.py new file mode 100644 index 00000000000..01c8830c846 --- /dev/null +++ b/yt/data_objects/selection_objects/point.py @@ -0,0 +1,52 @@ +from yt import YTArray +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer0D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import validate_3d_array, validate_object + + +class YTPoint(YTSelectionContainer0D): + """ + A 0-dimensional object defined by a single point + + Parameters + ---------- + p: array_like + A points defined within the domain. If the domain is + periodic its position will be corrected to lie inside + the range [DLE,DRE) to ensure one and only one cell may + match that point + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the data_set + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> c = [0.5,0.5,0.5] + >>> point = ds.point(c) + """ + + _type_name = "point" + _con_args = ("p",) + + def __init__(self, p, ds=None, field_parameters=None, data_source=None): + validate_3d_array(p) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + super(YTPoint, self).__init__(ds, field_parameters, data_source) + if isinstance(p, YTArray): + # we pass p through ds.arr to ensure code units are attached + self.p = self.ds.arr(p) + else: + self.p = self.ds.arr(p, "code_length") diff --git a/yt/data_objects/selection_objects/ray.py b/yt/data_objects/selection_objects/ray.py new file mode 100644 index 00000000000..0bdf7430e3a --- /dev/null +++ b/yt/data_objects/selection_objects/ray.py @@ -0,0 +1,227 @@ +import numpy as np +from unyt import udot, unorm + +from yt import YTArray, YTQuantity +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer1D, +) +from yt.data_objects.static_output import Dataset +from yt.frontends.sph.data_structures import SPHDataset +from yt.funcs import ( + fix_axis, + validate_3d_array, + validate_axis, + validate_float, + validate_iterable, + validate_object, +) +from yt.utilities.lib.pixelization_routines import SPHKernelInterpolationTable +from yt.utilities.logger import ytLogger as mylog + + +class YTOrthoRay(YTSelectionContainer1D): + """ + This is an orthogonal ray cast through the entire domain, at a specific + coordinate. + + This object is typically accessed through the `ortho_ray` object that + hangs off of index objects. The resulting arrays have their + dimensionality reduced to one, and an ordered list of points at an + (x,y) tuple along `axis` are available. + + Parameters + ---------- + axis : int or char + The axis along which to slice. Can be 0, 1, or 2 for x, y, z. + coords : tuple of floats + The (plane_x, plane_y) coordinates at which to cast the ray. Note + that this is in the plane coordinates: so if you are casting along + x, this will be (y, z). If you are casting along y, this will be + (z, x). If you are casting along z, this will be (x, y). + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the data_set + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> oray = ds.ortho_ray(0, (0.2, 0.74)) + >>> print(oray["Density"]) + + Note: The low-level data representation for rays are not guaranteed to be + spatially ordered. In particular, with AMR datasets, higher resolution + data is tagged on to the end of the ray. If you want this data + represented in a spatially ordered manner, manually sort it by the "t" + field, which is the value of the parametric variable that goes from 0 at + the start of the ray to 1 at the end: + + >>> my_ray = ds.ortho_ray(...) + >>> ray_sort = np.argsort(my_ray["t"]) + >>> density = my_ray["density"][ray_sort] + """ + + _key_fields = ["x", "y", "z", "dx", "dy", "dz"] + _type_name = "ortho_ray" + _con_args = ("axis", "coords") + + def __init__(self, axis, coords, ds=None, field_parameters=None, data_source=None): + validate_axis(ds, axis) + validate_iterable(coords) + for c in coords: + validate_float(c) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + super(YTOrthoRay, self).__init__(ds, field_parameters, data_source) + self.axis = fix_axis(axis, self.ds) + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + self.px_ax = xax + self.py_ax = yax + # Even though we may not be using x,y,z we use them here. + self.px_dx = "d%s" % ("xyz"[self.px_ax]) + self.py_dx = "d%s" % ("xyz"[self.py_ax]) + # Convert coordinates to code length. + if isinstance(coords[0], YTQuantity): + self.px = self.ds.quan(coords[0]).to("code_length") + else: + self.px = self.ds.quan(coords[0], "code_length") + if isinstance(coords[1], YTQuantity): + self.py = self.ds.quan(coords[1]).to("code_length") + else: + self.py = self.ds.quan(coords[1], "code_length") + self.sort_by = "xyz"[self.axis] + + @property + def coords(self): + return (self.px, self.py) + + +class YTRay(YTSelectionContainer1D): + """ + This is an arbitrarily-aligned ray cast through the entire domain, at a + specific coordinate. + + This object is typically accessed through the `ray` object that hangs + off of index objects. The resulting arrays have their + dimensionality reduced to one, and an ordered list of points at an + (x,y) tuple along `axis` are available, as is the `t` field, which + corresponds to a unitless measurement along the ray from start to + end. + + Parameters + ---------- + start_point : array-like set of 3 floats + The place where the ray starts. + end_point : array-like set of 3 floats + The place where the ray ends. + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the data_set + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> ray = ds.ray((0.2, 0.74, 0.11), (0.4, 0.91, 0.31)) + >>> print(ray["Density"], ray["t"], ray["dts"]) + + Note: The low-level data representation for rays are not guaranteed to be + spatially ordered. In particular, with AMR datasets, higher resolution + data is tagged on to the end of the ray. If you want this data + represented in a spatially ordered manner, manually sort it by the "t" + field, which is the value of the parametric variable that goes from 0 at + the start of the ray to 1 at the end: + + >>> my_ray = ds.ray(...) + >>> ray_sort = np.argsort(my_ray["t"]) + >>> density = my_ray["density"][ray_sort] + """ + + _type_name = "ray" + _con_args = ("start_point", "end_point") + _container_fields = ("t", "dts") + + def __init__( + self, start_point, end_point, ds=None, field_parameters=None, data_source=None + ): + validate_3d_array(start_point) + validate_3d_array(end_point) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + super(YTRay, self).__init__(ds, field_parameters, data_source) + if isinstance(start_point, YTArray): + self.start_point = self.ds.arr(start_point).to("code_length") + else: + self.start_point = self.ds.arr(start_point, "code_length", dtype="float64") + if isinstance(end_point, YTArray): + self.end_point = self.ds.arr(end_point).to("code_length") + else: + self.end_point = self.ds.arr(end_point, "code_length", dtype="float64") + if (self.start_point < self.ds.domain_left_edge).any() or ( + self.end_point > self.ds.domain_right_edge + ).any(): + mylog.warn( + "Ray start or end is outside the domain. " + + "Returned data will only be for the ray section inside the domain." + ) + self.vec = self.end_point - self.start_point + self._set_center(self.start_point) + self.set_field_parameter("center", self.start_point) + self._dts, self._ts = None, None + + def _generate_container_field(self, field): + # What should we do with `ParticleDataset`? + if isinstance(self.ds, SPHDataset): + return self._generate_container_field_sph(field) + else: + return self._generate_container_field_grid(field) + + def _generate_container_field_grid(self, field): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + if field == "dts": + return self._current_chunk.dtcoords + elif field == "t": + return self._current_chunk.tcoords + else: + raise KeyError(field) + + def _generate_container_field_sph(self, field): + if field not in ["dts", "t"]: + raise KeyError(field) + + length = unorm(self.vec) + pos = self[self.ds._sph_ptypes[0], "particle_position"] + r = pos - self.start_point + l = udot(r, self.vec / length) + + if field == "t": + return l / length + + hsml = self[self.ds._sph_ptypes[0], "smoothing_length"] + mass = self[self.ds._sph_ptypes[0], "particle_mass"] + dens = self[self.ds._sph_ptypes[0], "density"] + # impact parameter from particle to ray + b = np.sqrt(np.sum(r ** 2, axis=1) - l ** 2) + + # Use an interpolation table to evaluate the integrated 2D + # kernel from the dimensionless impact parameter b/hsml. + itab = SPHKernelInterpolationTable(self.ds.kernel_name) + dl = itab.interpolate_array(b / hsml) * mass / dens / hsml ** 2 + return dl / length diff --git a/yt/data_objects/selection_objects/region.py b/yt/data_objects/selection_objects/region.py new file mode 100644 index 00000000000..004a7e80934 --- /dev/null +++ b/yt/data_objects/selection_objects/region.py @@ -0,0 +1,71 @@ +from yt import YTArray +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ( + validate_3d_array, + validate_center, + validate_iterable, + validate_object, +) + + +class YTRegion(YTSelectionContainer3D): + """A 3D region of data with an arbitrary center. + + Takes an array of three *left_edge* coordinates, three + *right_edge* coordinates, and a *center* that can be anywhere + in the domain. If the selected region extends past the edges + of the domain, no data will be found there, though the + object's `left_edge` or `right_edge` are not modified. + + Parameters + ---------- + center : array_like + The center of the region + left_edge : array_like + The left edge of the region + right_edge : array_like + The right edge of the region + """ + + _type_name = "region" + _con_args = ("center", "left_edge", "right_edge") + + def __init__( + self, + center, + left_edge, + right_edge, + fields=None, + ds=None, + field_parameters=None, + data_source=None, + ): + if center is not None: + validate_center(center) + validate_3d_array(left_edge) + validate_3d_array(right_edge) + validate_iterable(fields) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) + if not isinstance(left_edge, YTArray): + self.left_edge = self.ds.arr(left_edge, "code_length", dtype="float64") + else: + # need to assign this dataset's unit registry to the YTArray + self.left_edge = self.ds.arr(left_edge.copy(), dtype="float64") + if not isinstance(right_edge, YTArray): + self.right_edge = self.ds.arr(right_edge, "code_length", dtype="float64") + else: + # need to assign this dataset's unit registry to the YTArray + self.right_edge = self.ds.arr(right_edge.copy(), dtype="float64") + + def _get_bbox(self): + """ + Return the minimum bounding box for the region. + """ + return self.left_edge, self.right_edge diff --git a/yt/data_objects/selection_objects/slices.py b/yt/data_objects/selection_objects/slices.py new file mode 100644 index 00000000000..3ed68142f97 --- /dev/null +++ b/yt/data_objects/selection_objects/slices.py @@ -0,0 +1,366 @@ +import numpy as np + +from yt import iterable +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer2D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ( + ensure_list, + validate_3d_array, + validate_axis, + validate_center, + validate_float, + validate_object, + validate_width_tuple, +) +from yt.utilities.minimal_representation import MinimalSliceData +from yt.utilities.orientation import Orientation + + +class YTSlice(YTSelectionContainer2D): + """ + This is a data object corresponding to a slice through the simulation + domain. + + This object is typically accessed through the `slice` object that hangs + off of index objects. Slice is an orthogonal slice through the + data, taking all the points at the finest resolution available and then + indexing them. It is more appropriately thought of as a slice + 'operator' than an object, however, as its field and coordinate can + both change. + + Parameters + ---------- + axis : int or char + The axis along which to slice. Can be 0, 1, or 2 for x, y, z. + coord : float + The coordinate along the axis at which to slice. This is in + "domain" coordinates. + center : array_like, optional + The 'center' supplied to fields that use it. Note that this does + not have to have `coord` as one value. optional. + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the data_set + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> slice = ds.slice(0, 0.25) + >>> print(slice["Density"]) + """ + + _top_node = "/Slices" + _type_name = "slice" + _con_args = ("axis", "coord") + _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz") + + def __init__( + self, axis, coord, center=None, ds=None, field_parameters=None, data_source=None + ): + validate_axis(ds, axis) + validate_float(coord) + # center is an optional parameter + if center is not None: + validate_center(center) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer2D.__init__(self, axis, ds, field_parameters, data_source) + self._set_center(center) + self.coord = coord + + def _generate_container_field(self, field): + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + if self._current_chunk is None: + self.index._identify_base_chunk(self) + if field == "px": + return self._current_chunk.fcoords[:, xax] + elif field == "py": + return self._current_chunk.fcoords[:, yax] + elif field == "pz": + return self._current_chunk.fcoords[:, self.axis] + elif field == "pdx": + return self._current_chunk.fwidth[:, xax] * 0.5 + elif field == "pdy": + return self._current_chunk.fwidth[:, yax] * 0.5 + elif field == "pdz": + return self._current_chunk.fwidth[:, self.axis] * 0.5 + else: + raise KeyError(field) + + @property + def _mrep(self): + return MinimalSliceData(self) + + def to_pw(self, fields=None, center="c", width=None, origin="center-window"): + r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this + object. + + This is a bare-bones mechanism of creating a plot window from this + object, which can then be moved around, zoomed, and on and on. All + behavior of the plot window is relegated to that routine. + """ + pw = self._get_pw(fields, center, width, origin, "Slice") + return pw + + def plot(self, fields=None): + if hasattr(self._data_source, "left_edge") and hasattr( + self._data_source, "right_edge" + ): + left_edge = self._data_source.left_edge + right_edge = self._data_source.right_edge + center = (left_edge + right_edge) / 2.0 + width = right_edge - left_edge + xax = self.ds.coordinates.x_axis[self.axis] + yax = self.ds.coordinates.y_axis[self.axis] + lx, rx = left_edge[xax], right_edge[xax] + ly, ry = left_edge[yax], right_edge[yax] + width = (rx - lx), (ry - ly) + else: + width = self.ds.domain_width + center = self.ds.domain_center + pw = self._get_pw(fields, center, width, "native", "Slice") + pw.show() + return pw + + +class YTCuttingPlane(YTSelectionContainer2D): + """ + This is a data object corresponding to an oblique slice through the + simulation domain. + + This object is typically accessed through the `cutting` object + that hangs off of index objects. A cutting plane is an oblique + plane through the data, defined by a normal vector and a coordinate. + It attempts to guess an 'north' vector, which can be overridden, and + then it pixelizes the appropriate data onto the plane without + interpolation. + + Parameters + ---------- + normal : array_like + The vector that defines the desired plane. For instance, the + angular momentum of a sphere. + center : array_like + The center of the cutting plane, where the normal vector is anchored. + north_vector: array_like, optional + An optional vector to describe the north-facing direction in the resulting + plane. + ds: ~yt.data_objects.static_output.Dataset, optional + An optional dataset to use rather than self.ds + field_parameters : dictionary + A dictionary of field parameters than can be accessed by derived + fields. + data_source: optional + Draw the selection from the provided data source rather than + all data associated with the dataset + + Notes + ----- + + This data object in particular can be somewhat expensive to create. + It's also important to note that unlike the other 2D data objects, this + object provides px, py, pz, as some cells may have a height from the + plane. + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> cp = ds.cutting([0.1, 0.2, -0.9], [0.5, 0.42, 0.6]) + >>> print(cp["Density"]) + """ + + _plane = None + _top_node = "/CuttingPlanes" + _key_fields = YTSelectionContainer2D._key_fields + ["pz", "pdz"] + _type_name = "cutting" + _con_args = ("normal", "center") + _tds_attrs = ("_inv_mat",) + _tds_fields = ("x", "y", "z", "dx") + _container_fields = ("px", "py", "pz", "pdx", "pdy", "pdz") + + def __init__( + self, + normal, + center, + north_vector=None, + ds=None, + field_parameters=None, + data_source=None, + ): + validate_3d_array(normal) + validate_center(center) + if north_vector is not None: + validate_3d_array(north_vector) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer2D.__init__(self, 4, ds, field_parameters, data_source) + self._set_center(center) + self.set_field_parameter("center", center) + # Let's set up our plane equation + # ax + by + cz + d = 0 + self.orienter = Orientation(normal, north_vector=north_vector) + self._norm_vec = self.orienter.normal_vector + self._d = -1.0 * np.dot(self._norm_vec, self.center) + self._x_vec = self.orienter.unit_vectors[0] + self._y_vec = self.orienter.unit_vectors[1] + # First we try all three, see which has the best result: + self._rot_mat = np.array([self._x_vec, self._y_vec, self._norm_vec]) + self._inv_mat = np.linalg.pinv(self._rot_mat) + self.set_field_parameter("cp_x_vec", self._x_vec) + self.set_field_parameter("cp_y_vec", self._y_vec) + self.set_field_parameter("cp_z_vec", self._norm_vec) + + @property + def normal(self): + return self._norm_vec + + def _generate_container_field(self, field): + if self._current_chunk is None: + self.index._identify_base_chunk(self) + if field == "px": + x = self._current_chunk.fcoords[:, 0] - self.center[0] + y = self._current_chunk.fcoords[:, 1] - self.center[1] + z = self._current_chunk.fcoords[:, 2] - self.center[2] + tr = np.zeros(x.size, dtype="float64") + tr = self.ds.arr(tr, "code_length") + tr += x * self._x_vec[0] + tr += y * self._x_vec[1] + tr += z * self._x_vec[2] + return tr + elif field == "py": + x = self._current_chunk.fcoords[:, 0] - self.center[0] + y = self._current_chunk.fcoords[:, 1] - self.center[1] + z = self._current_chunk.fcoords[:, 2] - self.center[2] + tr = np.zeros(x.size, dtype="float64") + tr = self.ds.arr(tr, "code_length") + tr += x * self._y_vec[0] + tr += y * self._y_vec[1] + tr += z * self._y_vec[2] + return tr + elif field == "pz": + x = self._current_chunk.fcoords[:, 0] - self.center[0] + y = self._current_chunk.fcoords[:, 1] - self.center[1] + z = self._current_chunk.fcoords[:, 2] - self.center[2] + tr = np.zeros(x.size, dtype="float64") + tr = self.ds.arr(tr, "code_length") + tr += x * self._norm_vec[0] + tr += y * self._norm_vec[1] + tr += z * self._norm_vec[2] + return tr + elif field == "pdx": + return self._current_chunk.fwidth[:, 0] * 0.5 + elif field == "pdy": + return self._current_chunk.fwidth[:, 1] * 0.5 + elif field == "pdz": + return self._current_chunk.fwidth[:, 2] * 0.5 + else: + raise KeyError(field) + + def to_pw(self, fields=None, center="c", width=None, axes_unit=None): + r"""Create a :class:`~yt.visualization.plot_window.PWViewerMPL` from this + object. + + This is a bare-bones mechanism of creating a plot window from this + object, which can then be moved around, zoomed, and on and on. All + behavior of the plot window is relegated to that routine. + """ + normal = self.normal + center = self.center + self.fields = ensure_list(fields) + [ + k for k in self.field_data.keys() if k not in self._key_fields + ] + from yt.visualization.fixed_resolution import FixedResolutionBuffer + from yt.visualization.plot_window import ( + PWViewerMPL, + get_oblique_window_parameters, + ) + + (bounds, center_rot) = get_oblique_window_parameters( + normal, center, width, self.ds + ) + pw = PWViewerMPL( + self, + bounds, + fields=self.fields, + origin="center-window", + periodic=False, + oblique=True, + frb_generator=FixedResolutionBuffer, + plot_type="OffAxisSlice", + ) + if axes_unit is not None: + pw.set_axes_unit(axes_unit) + pw._setup_plots() + return pw + + def to_frb(self, width, resolution, height=None, periodic=False): + r"""This function returns a FixedResolutionBuffer generated from this + object. + + An ObliqueFixedResolutionBuffer is an object that accepts a + variable-resolution 2D object and transforms it into an NxM bitmap that + can be plotted, examined or processed. This is a convenience function + to return an FRB directly from an existing 2D data object. Unlike the + corresponding to_frb function for other YTSelectionContainer2D objects, + this does not accept a 'center' parameter as it is assumed to be + centered at the center of the cutting plane. + + Parameters + ---------- + width : width specifier + This can either be a floating point value, in the native domain + units of the simulation, or a tuple of the (value, unit) style. + This will be the width of the FRB. + height : height specifier, optional + This will be the height of the FRB, by default it is equal to width. + resolution : int or tuple of ints + The number of pixels on a side of the final FRB. + periodic : boolean + This can be true or false, and governs whether the pixelization + will span the domain boundaries. + + Returns + ------- + frb : :class:`~yt.visualization.fixed_resolution.ObliqueFixedResolutionBuffer` + A fixed resolution buffer, which can be queried for fields. + + Examples + -------- + + >>> v, c = ds.find_max("density") + >>> sp = ds.sphere(c, (100.0, 'au')) + >>> L = sp.quantities.angular_momentum_vector() + >>> cutting = ds.cutting(L, c) + >>> frb = cutting.to_frb( (1.0, 'pc'), 1024) + >>> write_image(np.log10(frb["Density"]), 'density_1pc.png') + """ + if iterable(width): + validate_width_tuple(width) + width = self.ds.quan(width[0], width[1]) + if height is None: + height = width + elif iterable(height): + validate_width_tuple(height) + height = self.ds.quan(height[0], height[1]) + if not iterable(resolution): + resolution = (resolution, resolution) + from yt.visualization.fixed_resolution import FixedResolutionBuffer + + bounds = (-width / 2.0, width / 2.0, -height / 2.0, height / 2.0) + frb = FixedResolutionBuffer(self, bounds, resolution, periodic=periodic) + return frb diff --git a/yt/data_objects/selection_objects/spheroids.py b/yt/data_objects/selection_objects/spheroids.py new file mode 100644 index 00000000000..8b1a0ee6086 --- /dev/null +++ b/yt/data_objects/selection_objects/spheroids.py @@ -0,0 +1,231 @@ +import numpy as np + +from yt import YTArray +from yt.data_objects.selection_objects.base_objects import ( + YTSelectionContainer, + YTSelectionContainer3D, +) +from yt.data_objects.static_output import Dataset +from yt.funcs import ( + fix_length, + validate_3d_array, + validate_center, + validate_float, + validate_iterable, + validate_object, +) +from yt.utilities.exceptions import YTEllipsoidOrdering, YTException, YTSphereTooSmall +from yt.utilities.logger import ytLogger as mylog +from yt.utilities.math_utils import get_rotation_matrix +from yt.utilities.on_demand_imports import _miniball + + +class YTSphere(YTSelectionContainer3D): + """ + A sphere of points defined by a *center* and a *radius*. + + Parameters + ---------- + center : array_like + The center of the sphere. + radius : float, width specifier, or YTQuantity + The radius of the sphere. If passed a float, + that will be interpreted in code units. Also + accepts a (radius, unit) tuple or YTQuantity + instance with units attached. + + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> c = [0.5,0.5,0.5] + >>> sphere = ds.sphere(c, (1., "kpc")) + """ + + _type_name = "sphere" + _con_args = ("center", "radius") + + def __init__( + self, center, radius, ds=None, field_parameters=None, data_source=None + ): + validate_center(center) + validate_float(radius) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + super(YTSphere, self).__init__(center, ds, field_parameters, data_source) + # Unpack the radius, if necessary + radius = fix_length(radius, self.ds) + if radius < self.index.get_smallest_dx(): + raise YTSphereTooSmall( + ds, + radius.in_units("code_length"), + self.index.get_smallest_dx().in_units("code_length"), + ) + self.set_field_parameter("radius", radius) + self.set_field_parameter("center", self.center) + self.radius = radius + + def _get_bbox(self): + """ + Return the minimum bounding box for the sphere. + """ + return -self.radius + self.center, self.radius + self.center + + +class YTMinimalSphere(YTSelectionContainer3D): + """ + Build the smallest sphere that encompasses a set of points. + + Parameters + ---------- + points : YTArray + The points that the sphere will contain. + + Examples + -------- + + >>> import yt + >>> ds = yt.load("output_00080/info_00080.txt") + >>> points = ds.r['particle_position'] + >>> sphere = ds.minimal_sphere(points) + """ + + _type_name = "sphere" + _override_selector_name = "minimal_sphere" + _con_args = ("center", "radius") + + def __init__(self, points, ds=None, field_parameters=None, data_source=None): + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + validate_object(points, YTArray) + + points = fix_length(points, ds) + if len(points) < 2: + raise YTException( + "Not enough points. Expected at least 2, got %s" % len(points) + ) + mylog.debug("Building minimal sphere around points.") + mb = _miniball.Miniball(points) + if not mb.is_valid(): + raise YTException("Could not build valid sphere around points.") + + center = ds.arr(mb.center(), points.units) + radius = ds.quan(np.sqrt(mb.squared_radius()), points.units) + super(YTMinimalSphere, self).__init__(center, ds, field_parameters, data_source) + self.set_field_parameter("radius", radius) + self.set_field_parameter("center", self.center) + self.radius = radius + + +class YTEllipsoid(YTSelectionContainer3D): + """ + By providing a *center*,*A*,*B*,*C*,*e0*,*tilt* we + can define a ellipsoid of any proportion. Only cells whose + centers are within the ellipsoid will be selected. + + Parameters + ---------- + center : array_like + The center of the ellipsoid. + A : float + The magnitude of the largest axis (semi-major) of the ellipsoid. + B : float + The magnitude of the medium axis (semi-medium) of the ellipsoid. + C : float + The magnitude of the smallest axis (semi-minor) of the ellipsoid. + e0 : array_like (automatically normalized) + the direction of the largest semi-major axis of the ellipsoid + tilt : float + After the rotation about the z-axis to allign e0 to x in the x-y + plane, and then rotating about the y-axis to align e0 completely + to the x-axis, tilt is the angle in radians remaining to + rotate about the x-axis to align both e1 to the y-axis and e2 to + the z-axis. + Examples + -------- + + >>> import yt + >>> ds = yt.load("RedshiftOutput0005") + >>> c = [0.5,0.5,0.5] + >>> ell = ds.ellipsoid(c, 0.1, 0.1, 0.1, np.array([0.1, 0.1, 0.1]), 0.2) + """ + + _type_name = "ellipsoid" + _con_args = ("center", "_A", "_B", "_C", "_e0", "_tilt") + + def __init__( + self, + center, + A, + B, + C, + e0, + tilt, + fields=None, + ds=None, + field_parameters=None, + data_source=None, + ): + validate_center(center) + validate_float(A) + validate_float(B) + validate_float(C) + validate_3d_array(e0) + validate_float(tilt) + validate_iterable(fields) + validate_object(ds, Dataset) + validate_object(field_parameters, dict) + validate_object(data_source, YTSelectionContainer) + YTSelectionContainer3D.__init__(self, center, ds, field_parameters, data_source) + # make sure the magnitudes of semi-major axes are in order + if A < B or B < C: + raise YTEllipsoidOrdering(ds, A, B, C) + # make sure the smallest side is not smaller than dx + self._A = self.ds.quan(A, "code_length") + self._B = self.ds.quan(B, "code_length") + self._C = self.ds.quan(C, "code_length") + if self._C < self.index.get_smallest_dx(): + raise YTSphereTooSmall(self.ds, self._C, self.index.get_smallest_dx()) + self._e0 = e0 = e0 / (e0 ** 2.0).sum() ** 0.5 + self._tilt = tilt + + # find the t1 angle needed to rotate about z axis to align e0 to x + t1 = np.arctan(e0[1] / e0[0]) + # rotate e0 by -t1 + RZ = get_rotation_matrix(t1, (0, 0, 1)).transpose() + r1 = (e0 * RZ).sum(axis=1) + # find the t2 angle needed to rotate about y axis to align e0 to x + t2 = np.arctan(-r1[2] / r1[0]) + """ + calculate the original e1 + given the tilt about the x axis when e0 was aligned + to x after t1, t2 rotations about z, y + """ + RX = get_rotation_matrix(-tilt, (1, 0, 0)).transpose() + RY = get_rotation_matrix(-t2, (0, 1, 0)).transpose() + RZ = get_rotation_matrix(-t1, (0, 0, 1)).transpose() + e1 = ((0, 1, 0) * RX).sum(axis=1) + e1 = (e1 * RY).sum(axis=1) + e1 = (e1 * RZ).sum(axis=1) + e2 = np.cross(e0, e1) + + self._e1 = e1 + self._e2 = e2 + + self.set_field_parameter("A", A) + self.set_field_parameter("B", B) + self.set_field_parameter("C", C) + self.set_field_parameter("e0", e0) + self.set_field_parameter("e1", e1) + self.set_field_parameter("e2", e2) + + def _get_bbox(self): + """ + Get the bounding box for the ellipsoid. NOTE that in this case + it is not the *minimum* bounding box. + """ + radius = self.ds.arr(np.max([self._A, self._B, self._C]), "code_length") + return -radius + self.center, radius + self.center diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/unstructured_mesh.py index 54212d7cb8a..54dd2d49a1a 100644 --- a/yt/data_objects/unstructured_mesh.py +++ b/yt/data_objects/unstructured_mesh.py @@ -1,7 +1,7 @@ import numpy as np import yt.geometry.particle_deposit as particle_deposit -from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.funcs import mylog from yt.utilities.exceptions import YTParticleDepositionNotImplemented from yt.utilities.lib.mesh_utilities import fill_fcoords, fill_fwidths diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 87ebddad7a9..751eca3bb35 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -13,7 +13,6 @@ import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer from yt.data_objects.static_output import Dataset from yt.frontends.halo_catalog.data_structures import ( HaloCatalogFile, @@ -23,6 +22,7 @@ from yt.units import Mpc from yt.utilities.cython_fortran_utils import FortranFile +from ...data_objects.selection_objects.base_objects import YTSelectionContainer from .definitions import HEADER_ATTRIBUTES from .fields import AdaptaHOPFieldInfo diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index dc6efe24325..e23864c5347 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -5,7 +5,7 @@ import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.data_objects.static_output import ParticleDataset from yt.frontends.gadget.data_structures import _fix_unit_ordering from yt.frontends.gadget_fof.fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index ae5d3979ce4..1ca3c68a4bb 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -282,10 +282,11 @@ def _ortho_pixelize( self, data_source, field, bounds, size, antialias, dim, periodic ): from yt.data_objects.construction_data_containers import YTParticleProj - from yt.data_objects.selection_data_containers import YTSlice from yt.frontends.sph.data_structures import ParticleDataset from yt.frontends.stream.data_structures import StreamParticlesDataset + from ...data_objects.selection_objects.slices import YTSlice + # We should be using fcoords field = data_source._determine_fields(field)[0] finfo = data_source.ds.field_info[field] diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index cbb6979b5bc..b8499c5b978 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -14,7 +14,7 @@ from yt.config import ytcfg from yt.convenience import load, simulation -from yt.data_objects.selection_data_containers import YTRegion +from yt.data_objects.selection_objects.region import YTRegion from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.api import save_as_dataset from yt.units.yt_array import YTArray, YTQuantity diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index 30e81e3f877..e73f1fa571b 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -365,7 +365,7 @@ def __init__( ---------- ds : `Dataset` The dataset which will serve as the base for these particles. - data_source : `yt.data_objects.data_containers.YTSelectionContainer` + data_source : `yt.data_objects.selection_objects.base_objects.YTSelectionContainer` The data source containing the density field. num_particles : int The number of particles to be generated diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index d57f423edb8..9ffea7f4a97 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -8,7 +8,7 @@ from yt.data_objects.data_containers import YTDataContainer from yt.data_objects.level_sets.clump_handling import Clump -from yt.data_objects.selection_data_containers import YTCutRegion +from yt.data_objects.selection_objects.cut_region import YTCutRegion from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.data_structures import YTClumpContainer from yt.funcs import iterable, mylog, validate_width_tuple diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 44976a3c2c8..fc78f43f85f 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -153,7 +153,7 @@ class PlotWindow(ImagePlotContainer): Parameters ---------- - data_source : :class:`yt.data_objects.selection_data_containers.YTSelectionContainer2D` + data_source : :class:`yt.data_objects.selection_objects.base_objects.YTSelectionContainer2D` This is the source to be pixelized, which can be a projection, slice, or a cutting plane. bounds : sequence of floats diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index bec52d51dfa..82cc0c6b89c 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -8,7 +8,6 @@ import matplotlib import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer from yt.data_objects.profiles import create_profile, sanitize_field_tuple_keys from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.data_structures import YTProfileDataset @@ -16,6 +15,7 @@ from yt.utilities.exceptions import YTNotInsideNotebook from yt.utilities.logger import ytLogger as mylog +from ..data_objects.selection_objects.base_objects import YTSelectionContainer from .base_plot_types import ImagePlotMPL, PlotMPL from .plot_container import ( ImagePlotContainer, diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 4a073a9d3f1..95a5a06c09a 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer3D +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer3D from yt.data_objects.static_output import Dataset from yt.utilities.lib import bounding_volume_hierarchy from yt.utilities.lib.image_samplers import ( From ebe37806966269d46c40faa61cd46694972b70cb Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 14:34:21 -0500 Subject: [PATCH 342/653] Missed these in my merge conflict fixing --- yt/data_objects/selection_objects/base_objects.py | 7 ++++--- yt/data_objects/selection_objects/ray.py | 2 +- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index 5eb82b2fd96..0d8718a8539 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -251,11 +251,12 @@ def _generate_fields(self, fields_to_generate): raise YTDimensionalityError(fi.dimensions, dimensions) fi.units = units self.field_data[field] = self.ds.arr(fd, units) - msg = ( + mylog.warning( "Field %s was added without specifying units, " - "assuming units are %s" + "assuming units are %s", + fi.name, + units, ) - mylog.warning(msg % (fi.name, units)) try: fd.convert_to_units(fi.units) except AttributeError: diff --git a/yt/data_objects/selection_objects/ray.py b/yt/data_objects/selection_objects/ray.py index 0bdf7430e3a..d799081ae56 100644 --- a/yt/data_objects/selection_objects/ray.py +++ b/yt/data_objects/selection_objects/ray.py @@ -176,7 +176,7 @@ def __init__( if (self.start_point < self.ds.domain_left_edge).any() or ( self.end_point > self.ds.domain_right_edge ).any(): - mylog.warn( + mylog.warning( "Ray start or end is outside the domain. " + "Returned data will only be for the ray section inside the domain." ) From f363df66f7475d3f9d248c36388c1035860c8499 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 15:10:09 -0500 Subject: [PATCH 343/653] Move index sub objects to index_subobjects --- yt/data_objects/api.py | 11 +---------- yt/data_objects/index_subobjects/__init__.py | 0 yt/data_objects/{ => index_subobjects}/grid_patch.py | 0 .../{ => index_subobjects}/octree_subset.py | 0 .../{ => index_subobjects}/particle_container.py | 0 .../{ => index_subobjects}/unstructured_mesh.py | 0 yt/frontends/_skeleton/data_structures.py | 2 +- yt/frontends/amrvac/data_structures.py | 2 +- yt/frontends/art/data_structures.py | 2 +- yt/frontends/artio/data_structures.py | 2 +- yt/frontends/athena/data_structures.py | 2 +- yt/frontends/athena_pp/data_structures.py | 4 ++-- yt/frontends/boxlib/data_structures.py | 2 +- yt/frontends/chombo/data_structures.py | 2 +- yt/frontends/enzo/data_structures.py | 2 +- yt/frontends/enzo_p/data_structures.py | 2 +- yt/frontends/exodus_ii/data_structures.py | 2 +- yt/frontends/fits/data_structures.py | 2 +- yt/frontends/flash/data_structures.py | 2 +- yt/frontends/gamer/data_structures.py | 2 +- yt/frontends/gdf/data_structures.py | 2 +- yt/frontends/moab/data_structures.py | 2 +- yt/frontends/open_pmd/data_structures.py | 2 +- yt/frontends/ramses/data_structures.py | 2 +- yt/frontends/stream/data_structures.py | 9 ++++++--- yt/frontends/ytdata/data_structures.py | 2 +- yt/geometry/coordinates/cartesian_coordinates.py | 2 +- yt/geometry/particle_geometry_handler.py | 2 +- 28 files changed, 29 insertions(+), 35 deletions(-) create mode 100644 yt/data_objects/index_subobjects/__init__.py rename yt/data_objects/{ => index_subobjects}/grid_patch.py (100%) rename yt/data_objects/{ => index_subobjects}/octree_subset.py (100%) rename yt/data_objects/{ => index_subobjects}/particle_container.py (100%) rename yt/data_objects/{ => index_subobjects}/unstructured_mesh.py (100%) diff --git a/yt/data_objects/api.py b/yt/data_objects/api.py index 4dca4a02880..8b137891791 100644 --- a/yt/data_objects/api.py +++ b/yt/data_objects/api.py @@ -1,10 +1 @@ -from . import construction_data_containers as __cdc, selection_data_containers as __sdc -from .analyzer_objects import AnalysisTask, analysis_task -from .data_containers import data_object_registry -from .grid_patch import AMRGridPatch -from .image_array import ImageArray -from .octree_subset import OctreeSubset -from .particle_filters import add_particle_filter, particle_filter -from .profiles import ParticleProfile, Profile1D, Profile2D, Profile3D, create_profile -from .static_output import Dataset -from .time_series import DatasetSeries, DatasetSeriesObject + diff --git a/yt/data_objects/index_subobjects/__init__.py b/yt/data_objects/index_subobjects/__init__.py new file mode 100644 index 00000000000..e69de29bb2d diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/index_subobjects/grid_patch.py similarity index 100% rename from yt/data_objects/grid_patch.py rename to yt/data_objects/index_subobjects/grid_patch.py diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/index_subobjects/octree_subset.py similarity index 100% rename from yt/data_objects/octree_subset.py rename to yt/data_objects/index_subobjects/octree_subset.py diff --git a/yt/data_objects/particle_container.py b/yt/data_objects/index_subobjects/particle_container.py similarity index 100% rename from yt/data_objects/particle_container.py rename to yt/data_objects/index_subobjects/particle_container.py diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/index_subobjects/unstructured_mesh.py similarity index 100% rename from yt/data_objects/unstructured_mesh.py rename to yt/data_objects/index_subobjects/unstructured_mesh.py diff --git a/yt/frontends/_skeleton/data_structures.py b/yt/frontends/_skeleton/data_structures.py index d0e6bf06000..98e0c026f9b 100644 --- a/yt/frontends/_skeleton/data_structures.py +++ b/yt/frontends/_skeleton/data_structures.py @@ -3,7 +3,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 30fde503bde..5aa99b40147 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -13,7 +13,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import mylog, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index b2bb623a5c2..ae2e264bf67 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -6,7 +6,7 @@ import numpy as np import yt.utilities.fortran_utils as fpu -from yt.data_objects.octree_subset import OctreeSubset +from yt.data_objects.index_subobjects.octree_subset import OctreeSubset from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.static_output import Dataset, ParticleFile from yt.frontends.art.definitions import ( diff --git a/yt/frontends/artio/data_structures.py b/yt/frontends/artio/data_structures.py index 5a501a71ca7..962353ad9fb 100644 --- a/yt/frontends/artio/data_structures.py +++ b/yt/frontends/artio/data_structures.py @@ -6,7 +6,7 @@ import yt.geometry.particle_deposit as particle_deposit from yt.data_objects.field_data import YTFieldData -from yt.data_objects.octree_subset import OctreeSubset +from yt.data_objects.index_subobjects.octree_subset import OctreeSubset from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.static_output import Dataset from yt.frontends.artio import _artio_caller diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index fbd8a3241de..859d0488733 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -3,7 +3,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import ensure_tuple, mylog, sglob from yt.geometry.geometry_handler import YTDataChunk diff --git a/yt/frontends/athena_pp/data_structures.py b/yt/frontends/athena_pp/data_structures.py index f41ee1912cd..116ad72218a 100644 --- a/yt/frontends/athena_pp/data_structures.py +++ b/yt/frontends/athena_pp/data_structures.py @@ -4,9 +4,9 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.unstructured_mesh import SemiStructuredMesh from yt.data_objects.static_output import Dataset -from yt.data_objects.unstructured_mesh import SemiStructuredMesh from yt.funcs import ensure_tuple, get_pbar, mylog from yt.geometry.geometry_handler import YTDataChunk from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index b4ceeacfd55..7110aaa4d4e 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -7,7 +7,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import ensure_tuple, mylog, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/chombo/data_structures.py b/yt/frontends/chombo/data_structures.py index 7a9014c09f5..9999e435759 100644 --- a/yt/frontends/chombo/data_structures.py +++ b/yt/frontends/chombo/data_structures.py @@ -4,7 +4,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import mylog, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/enzo/data_structures.py b/yt/frontends/enzo/data_structures.py index c5ba83655b8..8874c5018f6 100644 --- a/yt/frontends/enzo/data_structures.py +++ b/yt/frontends/enzo/data_structures.py @@ -8,7 +8,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.fields.field_info_container import NullFunc from yt.frontends.enzo.misc import cosmology_get_units diff --git a/yt/frontends/enzo_p/data_structures.py b/yt/frontends/enzo_p/data_structures.py index 3106b201870..87159395c6b 100644 --- a/yt/frontends/enzo_p/data_structures.py +++ b/yt/frontends/enzo_p/data_structures.py @@ -4,7 +4,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.fields.field_info_container import NullFunc from yt.frontends.enzo.misc import cosmology_get_units diff --git a/yt/frontends/exodus_ii/data_structures.py b/yt/frontends/exodus_ii/data_structures.py index 3587e2888f6..08aedbae5ec 100644 --- a/yt/frontends/exodus_ii/data_structures.py +++ b/yt/frontends/exodus_ii/data_structures.py @@ -1,8 +1,8 @@ import numpy as np +from yt.data_objects.index_subobjects.unstructured_mesh import UnstructuredMesh from yt.data_objects.static_output import Dataset from yt.data_objects.unions import MeshUnion -from yt.data_objects.unstructured_mesh import UnstructuredMesh from yt.funcs import setdefaultattr from yt.geometry.unstructured_mesh_handler import UnstructuredIndex from yt.utilities.file_handler import NetCDF4FileHandler, warn_netcdf diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index 72b6f039590..41beea5c786 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -9,7 +9,7 @@ import numpy.core.defchararray as np_char from yt.config import ytcfg -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import ensure_list, issue_deprecation_warning, mylog, setdefaultattr from yt.geometry.geometry_handler import YTDataChunk diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 2af37394ebe..46648192c32 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -3,7 +3,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset, ParticleFile, validate_index_order from yt.funcs import mylog, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index e45aa626315..c353671c988 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -3,7 +3,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import mylog, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index 859b5b7e7b9..dea9b6f5172 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -3,7 +3,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.funcs import ensure_tuple, just_one, setdefaultattr from yt.geometry.grid_geometry_handler import GridIndex diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 6c12c925e52..974eaedeb47 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -3,8 +3,8 @@ import numpy as np +from yt.data_objects.index_subobjects.unstructured_mesh import SemiStructuredMesh from yt.data_objects.static_output import Dataset -from yt.data_objects.unstructured_mesh import SemiStructuredMesh from yt.funcs import setdefaultattr from yt.geometry.unstructured_mesh_handler import UnstructuredIndex from yt.utilities.file_handler import HDF5FileHandler diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index b63ce96da4b..f1e1b073333 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -6,7 +6,7 @@ import numpy as np -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.static_output import Dataset from yt.data_objects.time_series import DatasetSeries from yt.frontends.open_pmd.fields import OpenPMDFieldInfo diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index c723de81c1b..64fbb5fa1f6 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -6,7 +6,7 @@ import numpy as np from yt.arraytypes import blankRecordArray -from yt.data_objects.octree_subset import OctreeSubset +from yt.data_objects.index_subobjects.octree_subset import OctreeSubset from yt.data_objects.particle_filters import add_particle_filter from yt.data_objects.static_output import Dataset from yt.funcs import mylog, setdefaultattr diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 1270658d5a9..50161c1ce75 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -9,12 +9,15 @@ import numpy as np from yt.data_objects.field_data import YTFieldData -from yt.data_objects.grid_patch import AMRGridPatch -from yt.data_objects.octree_subset import OctreeSubset +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.octree_subset import OctreeSubset +from yt.data_objects.index_subobjects.unstructured_mesh import ( + SemiStructuredMesh, + UnstructuredMesh, +) from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.static_output import Dataset, ParticleFile from yt.data_objects.unions import MeshUnion -from yt.data_objects.unstructured_mesh import SemiStructuredMesh, UnstructuredMesh from yt.frontends.exodus_ii.util import get_num_pseudo_dims from yt.frontends.sph.data_structures import SPHParticleIndex from yt.funcs import ensure_list, issue_deprecation_warning, iterable diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index 57bd7c1673a..779e40c89d7 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -6,7 +6,7 @@ import numpy as np from yt.data_objects.data_containers import GenerationInProgress -from yt.data_objects.grid_patch import AMRGridPatch +from yt.data_objects.index_subobjects.grid_patch import AMRGridPatch from yt.data_objects.particle_unions import ParticleUnion from yt.data_objects.profiles import ( Profile1DFromDataset, diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 32b8f0d71a4..5d6cc4bdbc8 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.unstructured_mesh import SemiStructuredMesh +from yt.data_objects.index_subobjects.unstructured_mesh import SemiStructuredMesh from yt.funcs import mylog from yt.units.yt_array import YTArray, uconcatenate, uvstack from yt.utilities.lib.pixelization_routines import ( diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 449d7bc7cd1..d9c6407e9ad 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -6,7 +6,7 @@ import numpy as np -from yt.data_objects.particle_container import ParticleContainer +from yt.data_objects.index_subobjects.particle_container import ParticleContainer from yt.funcs import get_pbar, only_on_root from yt.geometry.geometry_handler import Index, YTDataChunk from yt.geometry.particle_oct_container import ParticleBitmap From 1b5883ae5a12851f89a46c7b9c00ddc1aa79006e Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 15:19:16 -0500 Subject: [PATCH 344/653] Avoid circular import --- .../selection_objects/base_objects.py | 21 ++++++++++++++++++- 1 file changed, 20 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index 0d8718a8539..8204bb55821 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -11,7 +11,6 @@ from yt.data_objects.data_containers import YTDataContainer from yt.data_objects.derived_quantities import DerivedQuantityCollection from yt.data_objects.field_data import YTFieldData -from yt.data_objects.selection_objects.boolean_operations import YTBooleanContainer from yt.fields.field_exceptions import NeedsGridType from yt.funcs import ensure_list, fix_axis, validate_width_tuple from yt.geometry.selection_routines import compose_selector @@ -282,11 +281,19 @@ def __or__(self, other): if self.ds is not other.ds: raise YTBooleanObjectsWrongDataset() # Should maybe do something with field parameters here + from yt.data_objects.selection_objects.boolean_operations import ( + YTBooleanContainer, + ) + return YTBooleanContainer("OR", self, other, ds=self.ds) def __invert__(self): # ~obj asel = yt.geometry.selection_routines.AlwaysSelector(self.ds) + from yt.data_objects.selection_objects.boolean_operations import ( + YTBooleanContainer, + ) + return YTBooleanContainer("NOT", self, asel, ds=self.ds) def __xor__(self, other): @@ -294,6 +301,10 @@ def __xor__(self, other): raise YTBooleanObjectError(other) if self.ds is not other.ds: raise YTBooleanObjectsWrongDataset() + from yt.data_objects.selection_objects.boolean_operations import ( + YTBooleanContainer, + ) + return YTBooleanContainer("XOR", self, other, ds=self.ds) def __and__(self, other): @@ -301,6 +312,10 @@ def __and__(self, other): raise YTBooleanObjectError(other) if self.ds is not other.ds: raise YTBooleanObjectsWrongDataset() + from yt.data_objects.selection_objects.boolean_operations import ( + YTBooleanContainer, + ) + return YTBooleanContainer("AND", self, other, ds=self.ds) def __add__(self, other): @@ -311,6 +326,10 @@ def __sub__(self, other): raise YTBooleanObjectError(other) if self.ds is not other.ds: raise YTBooleanObjectsWrongDataset() + from yt.data_objects.selection_objects.boolean_operations import ( + YTBooleanContainer, + ) + return YTBooleanContainer("NEG", self, other, ds=self.ds) @contextmanager From 0275490ce90b1ffde2404cc87c9c6cc76c166539 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 23:02:30 +0200 Subject: [PATCH 345/653] revert 'proposal' --- .github/mergeable.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/mergeable.yml b/.github/mergeable.yml index f22ba50ba63..0fa89860041 100644 --- a/.github/mergeable.yml +++ b/.github/mergeable.yml @@ -9,5 +9,5 @@ mergeable: message: "WIP pull requests can't be merged." - do: label must_include: - regex: 'bug|enhancement|new feature|docs|infrastructure|dead code|refactor|proposal' + regex: 'bug|enhancement|new feature|docs|infrastructure|dead code|refactor' message: "Please label this pull request with one of: bug, enhancement, new feature, docs or infrastructure." From 16be12190cb5a30c00b23ecb6c5f89724159f36c Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 17:02:05 -0500 Subject: [PATCH 346/653] Update yt/utilities/object_registries.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/utilities/object_registries.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/utilities/object_registries.py b/yt/utilities/object_registries.py index 8c7018501e6..3c28e26238d 100644 --- a/yt/utilities/object_registries.py +++ b/yt/utilities/object_registries.py @@ -1,4 +1,5 @@ -# These are some of the data object registries that are used in different places in the code. Not all of the self-registering objects are included in these. +# These are some of the data object registries that are used in different places in the code. +# Not all of the self-registering objects are included in these. analysis_task_registry = {} data_object_registry = {} From 24463d03619bc028724deb646cce1097fa91f885 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 7 Aug 2020 17:21:01 -0500 Subject: [PATCH 347/653] Fix flake8 --- yt/utilities/object_registries.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/utilities/object_registries.py b/yt/utilities/object_registries.py index 3c28e26238d..f54c0dc2fef 100644 --- a/yt/utilities/object_registries.py +++ b/yt/utilities/object_registries.py @@ -1,5 +1,6 @@ -# These are some of the data object registries that are used in different places in the code. -# Not all of the self-registering objects are included in these. +# These are some of the data object registries that are used in different +# places in the code. Not all of the self-registering objects are included in +# these. analysis_task_registry = {} data_object_registry = {} From db88a62b0ed04d9d60051e2eac9db66d1cd40e4c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 8 Aug 2020 19:29:55 +0200 Subject: [PATCH 348/653] Add flynt action --- .github/workflows/slash-command-dispatch.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/slash-command-dispatch.yml b/.github/workflows/slash-command-dispatch.yml index 73b71c17ef2..e98282ecdf1 100644 --- a/.github/workflows/slash-command-dispatch.yml +++ b/.github/workflows/slash-command-dispatch.yml @@ -16,4 +16,5 @@ jobs: rebase isort black + flynt repository: yt-project/slash-command-processor From 3482cd277e43bb6c91a875440448f7ade55e57ad Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sat, 8 Aug 2020 20:59:40 +0200 Subject: [PATCH 349/653] Add instruction in PR checklist --- .github/PULL_REQUEST_TEMPLATE.md | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 997c86e4bba..143f27036e3 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -23,6 +23,7 @@ detail. Why is this change required? What problem does it solve?--> - [ ] pass `black --check yt/` - [ ] pass `isort . --check --diff` - [ ] pass `flake8 yt/` +- [ ] pass `flynt --fail-on-change --dry-run yt/` - [ ] New features are documented, with docstrings and narrative docs - [ ] Adds a test for any bugs fixed. Adds tests for new features. From 47d8c2beae201a765c06f9da02fc2855daf32775 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 9 Aug 2020 13:46:30 +0200 Subject: [PATCH 350/653] Update lint_requirements.txt --- tests/lint_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 0db50b0c78f..31ef5238fe7 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,4 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear --e git+https://github.com/cphyc/flynt.git@dry-run-mode#egg=flynt +-e git+https://github.com/ikamensh/flynt.git@mastere#egg=flynt From 36144fb255bfc475574cf9ad12175628cc82b3b9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 9 Aug 2020 13:46:52 +0200 Subject: [PATCH 351/653] Update lint_requirements.txt --- tests/lint_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 31ef5238fe7..b8e765f7d6d 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,4 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear --e git+https://github.com/ikamensh/flynt.git@mastere#egg=flynt +-e git+https://github.com/ikamensh/flynt.git@master#egg=flynt From f902c8fccb3928476cadadf00c55bd43381bba72 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 9 Aug 2020 19:54:29 +0200 Subject: [PATCH 352/653] Update flynt to latest relese --- tests/lint_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index b8e765f7d6d..6c07fac4c1a 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,4 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear --e git+https://github.com/ikamensh/flynt.git@master#egg=flynt +flynt==0.50 From aca7c0818d73eb69590d967af8ac99ba3945ab64 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Sun, 9 Aug 2020 16:01:23 -0500 Subject: [PATCH 353/653] Fix test that relied on string class names --- yt/data_objects/tests/test_disks.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_disks.py b/yt/data_objects/tests/test_disks.py index e76c952cf51..b4eec7b6966 100644 --- a/yt/data_objects/tests/test_disks.py +++ b/yt/data_objects/tests/test_disks.py @@ -51,7 +51,7 @@ def test_bad_disk_input(): desired = ( "Expected an object of 'yt.data_objects.static_output.Dataset' " "type, received " - "'yt.data_objects.selection_data_containers.YTRegion'" + "'yt.data_objects.selection_objects.region.YTRegion'" ) assert_equal(str(ex.exception), desired) From d6ee8221b0ce605700af4479af79a9e5d262e46f Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Sun, 9 Aug 2020 16:02:39 -0500 Subject: [PATCH 354/653] Rename *back* to GenerationInProgress --- yt/data_objects/selection_objects/base_objects.py | 6 +++--- yt/frontends/ytdata/data_structures.py | 4 ++-- yt/utilities/exceptions.py | 4 ++-- 3 files changed, 7 insertions(+), 7 deletions(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index 8204bb55821..22d3fb52a62 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -22,7 +22,7 @@ YTDimensionalityError, YTFieldUnitError, YTFieldUnitParseError, - YTGenerationInProgress, + GenerationInProgress, ) from yt.utilities.lib.marching_cubes import march_cubes_grid, march_cubes_grid_flux from yt.utilities.logger import ytLogger as mylog @@ -173,7 +173,7 @@ def get_data(self, fields=None): if len(fields_to_get) == 0 and len(fields_to_generate) == 0: return elif self._locked: - raise YTGenerationInProgress(fields) + raise GenerationInProgress(fields) # Track which ones we want in the end ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) # At this point, we want to figure out *all* our dependencies. @@ -270,7 +270,7 @@ def _generate_fields(self, fields_to_generate): except UnitParseError: raise YTFieldUnitParseError(fi) self.field_data[field] = fd - except YTGenerationInProgress as gip: + except GenerationInProgress as gip: for f in gip.fields: if f not in fields_to_generate: fields_to_generate.append(f) diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index a75f87ef19c..fe3c203f7bb 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -20,7 +20,7 @@ from yt.units import dimensions from yt.units.unit_registry import UnitRegistry from yt.units.yt_array import YTQuantity, uconcatenate -from yt.utilities.exceptions import YTFieldTypeNotFound, YTGenerationInProgress +from yt.utilities.exceptions import YTFieldTypeNotFound, GenerationInProgress from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only @@ -624,7 +624,7 @@ def get_data(self, fields=None): if len(fields_to_get) == 0 and len(fields_to_generate) == 0: return elif self._locked: - raise YTGenerationInProgress(fields) + raise GenerationInProgress(fields) # Track which ones we want in the end ofields = set(list(self.field_data.keys()) + fields_to_get + fields_to_generate) # At this point, we want to figure out *all* our dependencies. diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index f10a615bb37..98eaeafc956 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -874,7 +874,7 @@ def __str__(self): return msg -class YTGenerationInProgress(Exception): +class GenerationInProgress(Exception): def __init__(self, fields): self.fields = fields - super(YTGenerationInProgress, self).__init__() + super(GenerationInProgress, self).__init__() From 2dc0898c07fec02cc2877e73f26874d36c22d8b3 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Sun, 9 Aug 2020 16:03:36 -0500 Subject: [PATCH 355/653] Avoid too many ... relative imports --- yt/frontends/adaptahop/data_structures.py | 2 +- yt/geometry/coordinates/cartesian_coordinates.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 751eca3bb35..f465e5b54ba 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -22,7 +22,7 @@ from yt.units import Mpc from yt.utilities.cython_fortran_utils import FortranFile -from ...data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from .definitions import HEADER_ATTRIBUTES from .fields import AdaptaHOPFieldInfo diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 4f109f9db69..4506a711093 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -285,7 +285,7 @@ def _ortho_pixelize( from yt.frontends.sph.data_structures import ParticleDataset from yt.frontends.stream.data_structures import StreamParticlesDataset - from ...data_objects.selection_objects.slices import YTSlice + from yt.data_objects.selection_objects.slices import YTSlice # We should be using fcoords field = data_source._determine_fields(field)[0] From 551129974aa6a1dc3b26c4f8c87104c93b7c9408 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Sun, 9 Aug 2020 22:12:33 -0500 Subject: [PATCH 356/653] sort the imports --- yt/frontends/ytdata/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/ytdata/data_structures.py b/yt/frontends/ytdata/data_structures.py index fe3c203f7bb..685564590ad 100644 --- a/yt/frontends/ytdata/data_structures.py +++ b/yt/frontends/ytdata/data_structures.py @@ -20,7 +20,7 @@ from yt.units import dimensions from yt.units.unit_registry import UnitRegistry from yt.units.yt_array import YTQuantity, uconcatenate -from yt.utilities.exceptions import YTFieldTypeNotFound, GenerationInProgress +from yt.utilities.exceptions import GenerationInProgress, YTFieldTypeNotFound from yt.utilities.logger import ytLogger as mylog from yt.utilities.on_demand_imports import _h5py as h5py from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_root_only From ec7d4e90a9b4109106e6c7bb0802d630c7c195dd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 08:29:46 +0200 Subject: [PATCH 357/653] print versions in check mode for black, isort and flynt --- .travis.yml | 12 +++++++++--- 1 file changed, 9 insertions(+), 3 deletions(-) diff --git a/.travis.yml b/.travis.yml index 9c3b7d7f570..7c1c5688b31 100644 --- a/.travis.yml +++ b/.travis.yml @@ -81,17 +81,23 @@ jobs: - stage: Lint name: "flake8" python: 3.6 - script: flake8 yt/ + script: | + flake8 --version + flake8 yt/ - stage: Lint name: "isort" python: 3.6 - script: isort . --check --diff + script: | + isort --version + isort . --check --diff - stage: Lint name: "black" python: 3.6 - script: black --check yt/ + script: | + black --version + black --check yt/ - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" From 623a61b23bc4bd5f783e1ec6c44a22871c2b2b21 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 10:53:03 +0200 Subject: [PATCH 358/653] remove a test that's been disabled for 7 yrs --- yt/utilities/tests/test_amr_kdtree.py | 44 +-------------------------- 1 file changed, 1 insertion(+), 43 deletions(-) diff --git a/yt/utilities/tests/test_amr_kdtree.py b/yt/utilities/tests/test_amr_kdtree.py index e76cef04915..968b706bce3 100644 --- a/yt/utilities/tests/test_amr_kdtree.py +++ b/yt/utilities/tests/test_amr_kdtree.py @@ -2,49 +2,7 @@ import numpy as np -import yt.utilities.flagging_methods as fm -import yt.utilities.initial_conditions as ic -from yt.frontends.stream.api import load_uniform_grid, refine_amr -from yt.testing import assert_almost_equal, assert_equal, fake_amr_ds -from yt.utilities.amr_kdtree.api import AMRKDTree - - -def test_amr_kdtree_coverage(): - return # TESTDISABLED - domain_dims = (32, 32, 32) - data = np.zeros(domain_dims) + 0.25 - fo = [ic.CoredSphere(0.05, 0.3, [0.7, 0.4, 0.75], {"density": (0.25, 100.0)})] - rc = [fm.flagging_method_registry["overdensity"](8.0)] - ug = load_uniform_grid({"density": data}, domain_dims, 1.0) - ds = refine_amr(ug, rc, fo, 5) - - kd = AMRKDTree(ds) - - volume = kd.count_volume() - assert_equal(volume, np.prod(ds.domain_right_edge - ds.domain_left_edge)) - - cells = kd.count_cells() - true_cells = ds.all_data().quantities["TotalQuantity"]("Ones")[0] - assert_equal(cells, true_cells) - - # This largely reproduces the AMRKDTree.tree.check_tree() functionality - tree_ok = True - for node in kd.tree.trunk.depth_traverse(): - if node.grid is None: - continue - grid = ds.index.grids[node.grid - kd._id_offset] - dds = grid.dds - gle = grid.LeftEdge - nle = node.get_left_edge() - nre = node.get_right_edge() - li = np.rint((nle - gle) / dds).astype("int32") - ri = np.rint((nre - gle) / dds).astype("int32") - dims = (ri - li).astype("int32") - tree_ok *= np.all(grid.LeftEdge <= nle) - tree_ok *= np.all(grid.RightEdge >= nre) - tree_ok *= np.all(dims > 0) - - assert_equal(True, tree_ok) +from yt.testing import assert_almost_equal, fake_amr_ds def test_amr_kdtree_set_fields(): From 6d879f741b8ddd8f88f066dc16a72a045f3acfa5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 12:02:19 +0200 Subject: [PATCH 359/653] make enable_plugin() raise an error instead of a login entry in case no global plugin is found --- yt/funcs.py | 4 ++-- yt/utilities/command_line.py | 5 ++++- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/yt/funcs.py b/yt/funcs.py index 6c0365304c3..a9a3fdae594 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -1117,8 +1117,8 @@ def enable_plugins(pluginfilename=None): _fn = os.path.join(base_prefix, my_plugin_name) break else: - mylog.error("Could not find a global system plugin file.") - return + raise FileNotFoundError("Could not find a global system plugin file.") + if _fn.startswith(old_config_dir): mylog.warning( "Your plugin file is located in a deprecated directory. " diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 27fcb2e8079..5492082cb60 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -48,7 +48,10 @@ # loading field plugins for backward compatibility, since this module # used to do "from yt.mods import *" if ytcfg.getboolean("yt", "loadfieldplugins"): - enable_plugins() + try: + enable_plugins() + except FileNotFoundError: + pass _default_colormap = ytcfg.get("yt", "default_colormap") From cc8aa62e6140778d9c3d31687b1d9fb90e6a6966 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Mon, 10 Aug 2020 05:53:51 -0500 Subject: [PATCH 360/653] Manually isort! --- yt/data_objects/selection_objects/base_objects.py | 2 +- yt/frontends/adaptahop/data_structures.py | 2 +- yt/geometry/coordinates/cartesian_coordinates.py | 3 +-- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index 22d3fb52a62..0fcd31f7092 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -16,13 +16,13 @@ from yt.geometry.selection_routines import compose_selector from yt.units import dimensions as ytdims from yt.utilities.exceptions import ( + GenerationInProgress, YTBooleanObjectError, YTBooleanObjectsWrongDataset, YTDataSelectorNotImplemented, YTDimensionalityError, YTFieldUnitError, YTFieldUnitParseError, - GenerationInProgress, ) from yt.utilities.lib.marching_cubes import march_cubes_grid, march_cubes_grid_flux from yt.utilities.logger import ytLogger as mylog diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index f465e5b54ba..9f4e8af8e1b 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -13,6 +13,7 @@ import numpy as np +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.data_objects.static_output import Dataset from yt.frontends.halo_catalog.data_structures import ( HaloCatalogFile, @@ -22,7 +23,6 @@ from yt.units import Mpc from yt.utilities.cython_fortran_utils import FortranFile -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from .definitions import HEADER_ATTRIBUTES from .fields import AdaptaHOPFieldInfo diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 4506a711093..23e9c6e178a 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -282,11 +282,10 @@ def _ortho_pixelize( self, data_source, field, bounds, size, antialias, dim, periodic ): from yt.data_objects.construction_data_containers import YTParticleProj + from yt.data_objects.selection_objects.slices import YTSlice from yt.frontends.sph.data_structures import ParticleDataset from yt.frontends.stream.data_structures import StreamParticlesDataset - from yt.data_objects.selection_objects.slices import YTSlice - # We should be using fcoords field = data_source._determine_fields(field)[0] finfo = data_source.ds.field_info[field] From 0e15e1941307b7ccc8c7533605f176ab9195e4b8 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Mon, 10 Aug 2020 06:11:28 -0500 Subject: [PATCH 361/653] Attempt to fix some index_subobjects importing --- yt/data_objects/api.py | 10 +++++++++- yt/data_objects/index_subobjects/__init__.py | 2 ++ 2 files changed, 11 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/api.py b/yt/data_objects/api.py index 8b137891791..443f647d1f0 100644 --- a/yt/data_objects/api.py +++ b/yt/data_objects/api.py @@ -1 +1,9 @@ - +from . import construction_data_containers as __cdc, selection_data_containers as __sdc +from .analyzer_objects import AnalysisTask, analysis_task +from .data_containers import data_object_registry +from .image_array import ImageArray +from .index_subobjects import AMRGridPatch, OctreeSubset +from .particle_filters import add_particle_filter, particle_filter +from .profiles import ParticleProfile, Profile1D, Profile2D, Profile3D, create_profile +from .static_output import Dataset +from .time_series import DatasetSeries, DatasetSeriesObject diff --git a/yt/data_objects/index_subobjects/__init__.py b/yt/data_objects/index_subobjects/__init__.py index e69de29bb2d..eb8631ee679 100644 --- a/yt/data_objects/index_subobjects/__init__.py +++ b/yt/data_objects/index_subobjects/__init__.py @@ -0,0 +1,2 @@ +from .grid_patch import AMRGridPatch +from .octree_subset import OctreeSubset From 2c2c4340ecc8c8c20faa35c42096709698a7f4e5 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Mon, 10 Aug 2020 13:12:27 +0100 Subject: [PATCH 362/653] Change pass to return. --- yt/geometry/particle_geometry_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index a4ab1a09f2e..626d59ff705 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -47,7 +47,7 @@ def convert(self, unit): def _setup_filenames(self): if hasattr(self, "data_files"): - pass + return template = self.dataset.filename_template ndoms = self.dataset.file_count From c94da75729771f0e7d280cf87928db75ddecedb4 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 14:13:12 +0200 Subject: [PATCH 363/653] issue a deprecation warning at import time, remove entry from documentation --- doc/source/reference/configuration.rst | 1 - yt/__init__.py | 13 +++++++++++++ yt/config.py | 2 +- yt/utilities/command_line.py | 10 +++++----- 4 files changed, 19 insertions(+), 7 deletions(-) diff --git a/doc/source/reference/configuration.rst b/doc/source/reference/configuration.rst index e3331db3506..7d581e19997 100644 --- a/doc/source/reference/configuration.rst +++ b/doc/source/reference/configuration.rst @@ -82,7 +82,6 @@ used internally. * ``coloredlogs`` (default: ``False``): Should logs be colored? * ``default_colormap`` (default: ``arbre``): What colormap should be used by default for yt-produced images? -* ``loadfieldplugins`` (default: ``True``): Do we want to load the plugin file? * ``pluginfilename`` (default ``my_plugins.py``) The name of our plugin file. * ``logfile`` (default: ``False``): Should we output to a log file in the filesystem? diff --git a/yt/__init__.py b/yt/__init__.py index c95f1763248..f10da40a5fa 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -176,3 +176,16 @@ from yt.units.unit_systems import UnitSystem, unit_system_registry _called_from_pytest = False + + +def _check_deprecated_parameters(): + from yt.config import ytcfg + from yt.funcs import issue_deprecation_warning + + if ytcfg.getboolean("yt", "loadfieldplugins"): + issue_deprecation_warning( + "Found deprecated parameter 'loadfieldplugins' parameter in yt rcfile." + ) + + +_check_deprecated_parameters() diff --git a/yt/config.py b/yt/config.py index 09d459a611f..e2a2244fd1d 100644 --- a/yt/config.py +++ b/yt/config.py @@ -24,7 +24,7 @@ parameterfilestore="parameter_files.csv", maximumstoreddatasets="500", skip_dataset_cache="True", - loadfieldplugins="True", + loadfieldplugins="False", pluginfilename="my_plugins.py", parallel_traceback="False", pasteboard_repo="", diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 5492082cb60..03367ee9ada 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -47,11 +47,11 @@ # loading field plugins for backward compatibility, since this module # used to do "from yt.mods import *" -if ytcfg.getboolean("yt", "loadfieldplugins"): - try: - enable_plugins() - except FileNotFoundError: - pass + +try: + enable_plugins() +except FileNotFoundError: + pass _default_colormap = ytcfg.get("yt", "default_colormap") From a6e368f6f4cd5dad5fa4bbc0fb0afd2b5fb23507 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Mon, 10 Aug 2020 11:43:38 -0500 Subject: [PATCH 364/653] [doc] fix syntax errors in loading_data --- doc/source/examining/loading_data.rst | 67 ++++++++++++++------------- 1 file changed, 36 insertions(+), 31 deletions(-) diff --git a/doc/source/examining/loading_data.rst b/doc/source/examining/loading_data.rst index 218c47c8e53..26aba1b6a50 100644 --- a/doc/source/examining/loading_data.rst +++ b/doc/source/examining/loading_data.rst @@ -6,7 +6,7 @@ Loading Data This section contains information on how to load data into yt, as well as some important caveats about different data formats. -:: _loading-sample-data: +.. _loading-sample-data: Sample Data ----------- @@ -40,7 +40,8 @@ This will return a list of possible filenames; more information can be accessed AMRVAC Data ----------- -.. note: +.. note:: + This frontend is brand new and may be subject to rapid change in the near future. @@ -144,7 +145,8 @@ Appropriate errors are thrown for other combinations. * "stretched grids" as defined in AMRVAC have no correspondance in yt, hence will never be supported. -.. note +.. note:: + Ghost cells exist in .dat files but never read by yt. .. _loading-art-data: @@ -247,7 +249,8 @@ Athena Data Athena 4.x VTK data is supported and cared for by John ZuHone. Both uniform grid and SMR datasets are supported. -.. note: +.. note:: + yt also recognizes Fargo3D data written to VTK files as Athena data, but support for Fargo3D data is preliminary. @@ -1278,11 +1281,10 @@ and units. .. code-block:: python - bbox = [[-600.0, 600.0], [-600.0, 600.0], [-600.0, 600.0]] unit_base = { 'length': (1.0, 'kpc'), - 'velocity: (1.0, 'km/s'), + 'velocity': (1.0, 'km/s'), 'mass': (1.0, 'Msun') } @@ -1336,13 +1338,14 @@ of this format: .. code-block:: python - default = ( "Coordinates", - "Velocities", - "ParticleIDs", - "Mass", - ("InternalEnergy", "Gas"), - ("Density", "Gas"), - ("SmoothingLength", "Gas"), + default = ( + "Coordinates", + "Velocities", + "ParticleIDs", + "Mass", + ("InternalEnergy", "Gas"), + ("Density", "Gas"), + ("SmoothingLength", "Gas"), ) This is the default specification used by the Gadget frontend. It means that @@ -1354,14 +1357,15 @@ this: .. code-block:: python - my_field_def = ( "Coordinates", - "Velocities", - "ParticleIDs", - ("Metallicity", "Halo"), - "Mass", - ("InternalEnergy", "Gas"), - ("Density", "Gas"), - ("SmoothingLength", "Gas"), + my_field_def = ( + "Coordinates", + "Velocities", + "ParticleIDs", + ("Metallicity", "Halo"), + "Mass", + ("InternalEnergy", "Gas"), + ("Density", "Gas"), + ("SmoothingLength", "Gas"), ) To save time, you can utilize the plugins file for yt and use it to add items @@ -1534,7 +1538,7 @@ data like this: import yt ds = yt.load("InteractingJets/jet_000002") -For simulations without units (i.e., OPT__UNIT = 0), you can supply conversions +For simulations without units (i.e., ``OPT__UNIT = 0``), you can supply conversions for length, time, and mass to ``load`` using the ``units_override`` functionality: @@ -1554,8 +1558,8 @@ data. .. rubric:: Caveats -* GAMER data in raw binary format (i.e., OPT__OUTPUT_TOTAL = C-binary) is not -supported. +* GAMER data in raw binary format (i.e., ``OPT__OUTPUT_TOTAL = "C-binary"``) is not + supported. .. _loading-amr-data: @@ -1812,7 +1816,7 @@ To load multiple meshes, you can do: sl = yt.SlicePlot(ds, 'z', ('all', 'test')) Note that you must respect the field naming convention that fields on the first -mesh will have the type 'connect1', fields on the second will have 'connect2', etc... +mesh will have the type ``connect1``, fields on the second will have ``connect2``, etc... .. rubric:: Caveats @@ -2492,6 +2496,7 @@ It is possible to provide extra arguments to the load function when loading RAMS .. code-block:: python + import yt # Assuming RAMSES' levelmin=6, i.e. the structure is full @@ -2727,11 +2732,11 @@ parameters: If you wish to set the unit system directly, you can do so by using the ``unit_base`` keyword in the load statement. - .. code-block:: python +.. code-block:: python - import yt + import yt - ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')}) + ds = yt.load(filename, unit_base={'length', (1.0, 'Mpc')}) See the documentation for the :class:`~yt.frontends.tipsy.data_structures.TipsyDataset` class for more @@ -2745,7 +2750,7 @@ use keyword ``cosmology_parameters`` when loading your data set to indicate to yt that it is a cosmological data set. If you do not wish to set any non-default cosmological parameters, you may pass an empty dictionary. - .. code-block:: python +.. code-block:: python - import yt - ds = yt.load(filename, cosmology_parameters={}) + import yt + ds = yt.load(filename, cosmology_parameters={}) From 1452aac41a6cd2a291e5e9820e99a30cacaaf304 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 10 Aug 2020 20:35:18 +0200 Subject: [PATCH 365/653] Update yt/fields/xray_emission_fields.py Co-authored-by: Matthew Turk --- yt/fields/xray_emission_fields.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/xray_emission_fields.py b/yt/fields/xray_emission_fields.py index 1321be535d8..900a4cd15bf 100644 --- a/yt/fields/xray_emission_fields.py +++ b/yt/fields/xray_emission_fields.py @@ -83,7 +83,7 @@ class XrayEmissivityIntegrator: def __init__(self, table_type, redshift=0.0, data_dir=None, use_metals=True): filename = _get_data_file(table_type, data_dir=data_dir) - only_on_root(mylog.info, f"Loading emissivity data from {filename}.") + only_on_root(mylog.info, "Loading emissivity data from %s", filename) in_file = h5py.File(filename, mode="r") if "info" in in_file.attrs: only_on_root(mylog.info, parse_h5_attr(in_file, "info")) From af779f108b609e450420b5076ffe7cb28478dce1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 8 Aug 2020 19:48:46 +0200 Subject: [PATCH 366/653] gather all load_* functions in a common namespace revert __init__ changes --- setup.cfg | 1 + yt/__init__.py | 2 +- yt/loaders.py | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 yt/loaders.py diff --git a/setup.cfg b/setup.cfg index ac482fd533c..d92e2bf4753 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,6 +14,7 @@ max-line-length=88 exclude = doc, benchmarks, */api.py, # avoid spurious "unused import" + yt/loaders.py, # avoid spurious "unused import" */__init__.py, # avoid spurious "unused import" */__config__.py, # autogenerated yt/extern, # vendored libraries diff --git a/yt/__init__.py b/yt/__init__.py index c95f1763248..b698641a759 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -164,7 +164,7 @@ communication_system, ) -from yt.convenience import load, simulation +from yt.convenience import load, load_simulation, simulation from yt.utilities.load_sample import load_sample diff --git a/yt/loaders.py b/yt/loaders.py new file mode 100644 index 00000000000..2372e15e0f0 --- /dev/null +++ b/yt/loaders.py @@ -0,0 +1,18 @@ +""" +This module gathers all user-facing functions with a `load_` prefix. + +""" +# note: in the future, functions could be moved here instead +# in which case, this file should be removed from flake8 ignore list in setup.cfg + +# note: simulation() should be renamed load_simulation() +from .convenience import load, simulation +from .frontends.stream.api import ( + load_amr_grids, + load_hexahedral_mesh, + load_octree, + load_particles, + load_uniform_grid, + load_unstructured_mesh, +) +from .utilities import load_sample From f2411932a35d65f455751aeed4413bbcb9c5c4db Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 8 Aug 2020 20:16:52 +0200 Subject: [PATCH 367/653] deprecate yt.simulation in favor of yt.load_simulation --- yt/convenience.py | 14 +++++++++++++- yt/frontends/enzo/simulation_handling.py | 4 ++-- yt/frontends/exodus_ii/simulation_handling.py | 2 +- yt/frontends/gadget/simulation_handling.py | 4 ++-- yt/frontends/owls/simulation_handling.py | 2 +- yt/loaders.py | 3 +-- yt/tests/test_load_errors.py | 13 ++++++++----- yt/utilities/answer_testing/framework.py | 8 +++++--- yt/utilities/answer_testing/utils.py | 4 ++-- yt/visualization/profile_plotter.py | 6 +++--- 10 files changed, 38 insertions(+), 22 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index 13902ac84dc..aaa034593b1 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -85,7 +85,7 @@ def load(fn, *args, **kwargs): raise YTOutputNotIdentified(fn, args, kwargs) -def simulation(fn, simulation_type, find_outputs=False): +def load_simulation(fn, simulation_type, find_outputs=False): """ Load a simulation time series object of the specified simulation type. @@ -122,3 +122,15 @@ def simulation(fn, simulation_type, find_outputs=False): raise YTSimulationNotIdentified(simulation_type) return cls(fn, find_outputs=find_outputs) + + +def simulation(fn, simulation_type, find_outputs=False): + from yt.funcs import issue_deprecation_warning + + issue_deprecation_warning( + "yt.simulation is a deprecated alias for yt.load_simulation" + "and will be removed in a future version of yt." + ) + return load_simulation( + fn=fn, simulation_type=simulation_type, find_outputs=find_outputs + ) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 49406f99646..992dd8f208f 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -43,7 +43,7 @@ class EnzoSimulation(SimulationTimeSeries): Examples -------- >>> import yt - >>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") + >>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") >>> es.get_time_series() >>> for ds in es: ... print(ds.current_time) @@ -205,7 +205,7 @@ def get_time_series( -------- >>> import yt - >>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") + >>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), redshift_data=False) >>> for ds in es: diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index bfcee25759b..bb79e708dfd 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -21,7 +21,7 @@ class ExodusIISimulation(DatasetSeries): Examples -------- >>> import yt - >>> sim = yt.simulation("demo_second", "ExodusII") + >>> sim = yt.load_simulation("demo_second", "ExodusII") >>> sim.get_time_series() >>> for ds in sim: ... print(ds.current_time) diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 37e80e34a7d..048229dc50d 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -42,7 +42,7 @@ class GadgetSimulation(SimulationTimeSeries): Examples -------- >>> import yt - >>> gs = yt.simulation("my_simulation.par", "Gadget") + >>> gs = yt.load_simulation("my_simulation.par", "Gadget") >>> gs.get_time_series() >>> for ds in gs: ... print(ds.current_time) @@ -190,7 +190,7 @@ def get_time_series( -------- >>> import yt - >>> gs = yt.simulation("my_simulation.par", "Gadget") + >>> gs = yt.load_simulation("my_simulation.par", "Gadget") >>> gs.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr")) diff --git a/yt/frontends/owls/simulation_handling.py b/yt/frontends/owls/simulation_handling.py index 370079b38ee..bbfe75bd452 100644 --- a/yt/frontends/owls/simulation_handling.py +++ b/yt/frontends/owls/simulation_handling.py @@ -26,7 +26,7 @@ class OWLSSimulation(GadgetSimulation): Examples -------- >>> import yt - >>> es = yt.simulation("my_simulation.par", "OWLS") + >>> es = yt.load_simulation("my_simulation.par", "OWLS") >>> es.get_time_series() >>> for ds in es: ... print(ds.current_time) diff --git a/yt/loaders.py b/yt/loaders.py index 2372e15e0f0..e983d4cfe25 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -5,8 +5,7 @@ # note: in the future, functions could be moved here instead # in which case, this file should be removed from flake8 ignore list in setup.cfg -# note: simulation() should be renamed load_simulation() -from .convenience import load, simulation +from .convenience import load, load_simulation from .frontends.stream.api import ( load_amr_grids, load_hexahedral_mesh, diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 68c7da29ceb..619788b99a8 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -2,8 +2,8 @@ import tempfile from pathlib import Path -from yt.convenience import load, simulation from yt.data_objects.static_output import Dataset +from yt.loaders import load, load_simulation from yt.testing import assert_raises from yt.utilities.exceptions import ( YTAmbiguousDataType, @@ -17,7 +17,10 @@ def test_load_nonexistent_data(): with tempfile.TemporaryDirectory() as tmpdir: assert_raises(FileNotFoundError, load, os.path.join(tmpdir, "not_a_file")) assert_raises( - FileNotFoundError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo" + FileNotFoundError, + load_simulation, + os.path.join(tmpdir, "not_a_file"), + "Enzo", ) # this one is a design choice: it is preferable to report the most important @@ -25,7 +28,7 @@ def test_load_nonexistent_data(): # simulation_type), so we make sure the error raised is not YTSimulationNotIdentified assert_raises( FileNotFoundError, - simulation, + load_simulation, os.path.join(tmpdir, "not_a_file"), "unregistered_simulation_type", ) @@ -39,13 +42,13 @@ def test_load_unidentified_data(): assert_raises(YTOutputNotIdentified, load, empty_file_path) assert_raises( YTSimulationNotIdentified, - simulation, + load_simulation, tmpdir, "unregistered_simulation_type", ) assert_raises( YTSimulationNotIdentified, - simulation, + load_simulation, empty_file_path, "unregistered_simulation_type", ) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 25d7315ae22..359dd84ef0a 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -22,10 +22,10 @@ from nose.plugins import Plugin from yt.config import ytcfg -from yt.convenience import load, simulation from yt.data_objects.static_output import Dataset from yt.data_objects.time_series import SimulationTimeSeries from yt.funcs import get_pbar +from yt.loaders import load, load_simulation from yt.testing import ( assert_allclose_units, assert_almost_equal, @@ -323,7 +323,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): if file_check: return os.path.isfile(os.path.join(path, sim_fn)) and result_storage is not None try: - simulation(sim_fn, sim_type) + load_simulation(sim_fn, sim_type) except FileNotFoundError: if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: @@ -354,7 +354,9 @@ def sim_dir_load(sim_fn, path=None, sim_type="Enzo", find_outputs=False): raise IOError if os.path.exists(sim_fn) or not path: path = "." - return simulation(os.path.join(path, sim_fn), sim_type, find_outputs=find_outputs) + return load_simulation( + os.path.join(path, sim_fn), sim_type, find_outputs=find_outputs + ) class AnswerTestingTest: diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index cbb6979b5bc..d0eb4cd7821 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -13,10 +13,10 @@ import yaml from yt.config import ytcfg -from yt.convenience import load, simulation from yt.data_objects.selection_data_containers import YTRegion from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.api import save_as_dataset +from yt.loaders import load, load_simulation from yt.units.yt_array import YTArray, YTQuantity from yt.visualization import particle_plots, plot_window as pw, profile_plotter from yt.visualization.volume_rendering.scene import Scene @@ -322,7 +322,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): if file_check: return os.path.isfile(os.path.join(path, sim_fn)) try: - simulation(sim_fn, sim_type) + load_simulation(sim_fn, sim_type) except FileNotFoundError: return False return True diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 87d5e2c2c27..601fddf4842 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -193,7 +193,7 @@ class ProfilePlot: This creates profiles from a time series object. - >>> es = yt.simulation("AMRCosmology.enzo", "Enzo") + >>> es = yt.load_simulation("AMRCosmology.enzo", "Enzo") >>> es.get_time_series() >>> profiles = [] @@ -463,8 +463,8 @@ def from_profiles(cls, profiles, labels=None, plot_specs=None, y_log=None): Examples -------- - >>> from yt import simulation - >>> es = simulation("AMRCosmology.enzo", "Enzo") + >>> from yt import load_simulation + >>> es = load_simulation("AMRCosmology.enzo", "Enzo") >>> es.get_time_series() >>> profiles = [] From 60ecbd3f3e43ff3e83040c8c73ee9e4ff3322bd3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 9 Aug 2020 22:34:42 +0200 Subject: [PATCH 368/653] fix an import --- yt/loaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/loaders.py b/yt/loaders.py index e983d4cfe25..5315668520c 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -14,4 +14,4 @@ load_uniform_grid, load_unstructured_mesh, ) -from .utilities import load_sample +from .utilities.load_sample import load_sample From cc678522648902ae0c075fd6514cd0fd9a3339e5 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 14:10:57 +0200 Subject: [PATCH 369/653] Fixing abc --- yt/frontends/ramses/field_handlers.py | 4 ++-- yt/frontends/ramses/particle_handlers.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 7b9de6db697..0ae95b2ae39 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -23,7 +23,7 @@ def register_field_handler(ph): DETECTED_FIELDS = {} -class RegisteredRAMSESFieldFileHandler(abc.ABC): +class RegisteredRAMSESFieldFileHandler: """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. @@ -42,7 +42,7 @@ def __init_subclass__(cls, *args, **kwargs): return cls -class HandlerMixin(abc.ABC): +class HandlerMixin: """This contains all the shared methods to handle RAMSES files. This is not supposed to be user-facing. diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index ec2225a8215..666fccbf238 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -19,7 +19,7 @@ def register_particle_handler(ph): PARTICLE_HANDLERS.add(ph) -class RegisteredRAMSESParticleFileHandler(abc.ABC): +class RegisteredRAMSESParticleFileHandler: """ This is a base class that on instantiation registers the file handler into the list. Used as a metaclass. From 65e6c38f5c1af3e641e6a55759871743147e86aa Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 14:23:54 +0200 Subject: [PATCH 370/653] Do not use super class --- yt/frontends/ramses/field_handlers.py | 33 ++++++++++-------------- yt/frontends/ramses/particle_handlers.py | 33 ++++++++++-------------- 2 files changed, 26 insertions(+), 40 deletions(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 0ae95b2ae39..ffe1357a6c0 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -23,25 +23,6 @@ def register_field_handler(ph): DETECTED_FIELDS = {} -class RegisteredRAMSESFieldFileHandler: - """ - This is a base class that on instantiation registers the file - handler into the list. Used as a metaclass. - """ - - def __init_subclass__(cls, *args, **kwargs): - """ - Registers subclasses at creation. - """ - super().__init_subclass__(*args, **kwargs) - - if cls.ftype is not None: - register_field_handler(cls) - - cls._unique_registry = {} - return cls - - class HandlerMixin: """This contains all the shared methods to handle RAMSES files. @@ -146,7 +127,7 @@ def any_exist(cls, ds): return exists -class FieldFileHandler(abc.ABC, HandlerMixin, RegisteredRAMSESFieldFileHandler): +class FieldFileHandler(abc.ABC, HandlerMixin): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -173,6 +154,18 @@ class FieldFileHandler(abc.ABC, HandlerMixin, RegisteredRAMSESFieldFileHandler): None # Mapping from field to the type of the data (float, integer, ...) ) + def __init_subclass__(cls, *args, **kwargs): + """ + Registers subclasses at creation. + """ + super().__init_subclass__(*args, **kwargs) + + if cls.ftype is not None: + register_field_handler(cls) + + cls._unique_registry = {} + return cls + def __init__(self, domain): self.setup_handler(domain) diff --git a/yt/frontends/ramses/particle_handlers.py b/yt/frontends/ramses/particle_handlers.py index 666fccbf238..fa26a6f3beb 100644 --- a/yt/frontends/ramses/particle_handlers.py +++ b/yt/frontends/ramses/particle_handlers.py @@ -19,26 +19,7 @@ def register_particle_handler(ph): PARTICLE_HANDLERS.add(ph) -class RegisteredRAMSESParticleFileHandler: - """ - This is a base class that on instantiation registers the file - handler into the list. Used as a metaclass. - """ - - def __init_subclass__(cls, *args, **kwargs): - """ - Registers subclasses at creation. - """ - super().__init_subclass__(*args, **kwargs) - - if cls.ptype is not None: - register_particle_handler(cls) - - cls._unique_registry = {} - return cls - - -class ParticleFileHandler(abc.ABC, HandlerMixin, RegisteredRAMSESParticleFileHandler): +class ParticleFileHandler(abc.ABC, HandlerMixin): """ Abstract class to handle particles in RAMSES. Each instance represents a single file (one domain). @@ -66,6 +47,18 @@ class ParticleFileHandler(abc.ABC, HandlerMixin, RegisteredRAMSESParticleFileHan ) local_particle_count = None # The number of particle in the domain + def __init_subclass__(cls, *args, **kwargs): + """ + Registers subclasses at creation. + """ + super().__init_subclass__(*args, **kwargs) + + if cls.ptype is not None: + register_particle_handler(cls) + + cls._unique_registry = {} + return cls + def __init__(self, domain): self.setup_handler(domain) From d126e057c408f450ea52019e7dd757290bfbdfae Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 16:02:48 +0200 Subject: [PATCH 371/653] Also show flynt version --- .travis.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 1b973ca6738..586332155ad 100644 --- a/.travis.yml +++ b/.travis.yml @@ -96,7 +96,9 @@ jobs: - stage: Lint name: "flynt" python: 3.6 - script: flynt --fail-on-change --dry-run yt/ + script: | + flynt -h | head -n 1 + flynt --fail-on-change --dry-run yt/ - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" From 2a06af545e1aa892d2d1188354bd9fb51136a128 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 24 Jul 2020 13:07:15 -0500 Subject: [PATCH 372/653] Avoid excessive testing in RotationTest * avoid saving the same image 4 times in different format * rotate once to hit the code path, more is redundant --- .../volume_rendering/tests/test_scene.py | 19 ++++++++----------- 1 file changed, 8 insertions(+), 11 deletions(-) diff --git a/yt/visualization/volume_rendering/tests/test_scene.py b/yt/visualization/volume_rendering/tests/test_scene.py index efb2c2bed2c..14fad7ed38b 100644 --- a/yt/visualization/volume_rendering/tests/test_scene.py +++ b/yt/visualization/volume_rendering/tests/test_scene.py @@ -69,17 +69,14 @@ def test_rotation(self): mi_bound = ((ma - mi) * (0.10)) + mi ma_bound = ((ma - mi) * (0.90)) + mi tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap="Reds_r") - sc.render() - for suffix in ["png", "eps", "ps", "pdf"]: - fname = "test_scene.{}".format(suffix) - sc.save(fname, sigma_clip=6.0) - assert_fname(fname) - - nrot = 2 - for i in range(nrot): - sc.camera.pitch(2 * np.pi / nrot) - sc.render() - sc.save("test_rot_%04i.png" % i, sigma_clip=6.0) + fname = "test_scene.pdf" + sc.save(fname, sigma_clip=6.0) + assert_fname(fname) + + fname = "test_rot.png" + sc.camera.pitch(np.pi) + sc.save(fname, sigma_clip=6.0) + assert_fname(fname) def test_annotations(): From a678a6cf94f8a978f181acbd40d883c8680e6868 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 24 Jul 2020 13:08:46 -0500 Subject: [PATCH 373/653] Avoid excessive testing in test_rotation_volume_rendering * rotate once to hit the code path, more is redundant --- yt/visualization/volume_rendering/tests/test_varia.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index cc8b511fcee..74bc1adbdd1 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -63,11 +63,8 @@ def test_multiple_fields(self): def test_rotation_volume_rendering(self): im, sc = yt.volume_render(self.ds) - angle = 2 * np.pi - frames = 4 - for _ in range(frames): - sc.camera.yaw(angle / frames) - sc.render() + sc.camera.yaw(np.pi) + sc.render() def test_simple_volume_rendering(self): im, sc = yt.volume_render(self.ds, sigma_clip=4.0) From 5badf950c3cc836e8969a30782228dcdf2395ef7 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 24 Jul 2020 13:11:15 -0500 Subject: [PATCH 374/653] Remove redundant and excesive tests --- yt/visualization/volume_rendering/tests/test_sigma_clip.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/yt/visualization/volume_rendering/tests/test_sigma_clip.py b/yt/visualization/volume_rendering/tests/test_sigma_clip.py index a3ea34dcacb..e7ba536a5d7 100644 --- a/yt/visualization/volume_rendering/tests/test_sigma_clip.py +++ b/yt/visualization/volume_rendering/tests/test_sigma_clip.py @@ -35,8 +35,4 @@ def tearDown(self): def test_sigma_clip(self): ds = fake_random_ds(32) sc = yt.create_scene(ds) - im = sc.render() - sc.save("raw.png") sc.save("clip_2.png", sigma_clip=2) - sc.save("clip_4.png", sigma_clip=4.0) - return im, sc From 88e0692365f8f6e734f2c3e1c46fa877fca49db8 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 24 Jul 2020 13:13:57 -0500 Subject: [PATCH 375/653] Decrease the resolution of test images --- yt/visualization/volume_rendering/tests/test_lenses.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index 531eceda710..4b03d522cd9 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -51,7 +51,7 @@ def test_perspective_lens(self): def test_stereoperspective_lens(self): sc = Scene() cam = sc.add_camera(self.ds, lens_type="stereo-perspective") - cam.resolution = [1024, 512] + cam.resolution = [256, 128] cam.position = self.ds.arr(np.array([0.7, 0.7, 0.7]), "code_length") vol = VolumeSource(self.ds, field=self.field) tf = vol.transfer_function @@ -88,7 +88,7 @@ def test_plane_lens(self): def test_spherical_lens(self): sc = Scene() cam = sc.add_camera(self.ds, lens_type="spherical") - cam.resolution = [512, 256] + cam.resolution = [256, 128] cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), "code_length") vol = VolumeSource(self.ds, field=self.field) tf = vol.transfer_function @@ -101,7 +101,7 @@ def test_stereospherical_lens(self): w = self.ds.arr(w, "code_length") sc = Scene() cam = sc.add_camera(self.ds, lens_type="stereo-spherical") - cam.resolution = [512, 512] + cam.resolution = [256, 256] cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), "code_length") vol = VolumeSource(self.ds, field=self.field) tf = vol.transfer_function From c3b9f92bdbb755a05aee4cc4f99b7d564021cad2 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 18:35:45 +0200 Subject: [PATCH 376/653] Bump flynt requirements --- .pre-commit-config.yaml | 9 +++------ tests/lint_requirements.txt | 2 +- 2 files changed, 4 insertions(+), 7 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index d98662753c7..0e8cbc01f65 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,10 +11,7 @@ rev: '3.8.1' # keep in sync with tests/lint_requirements.txt hooks: - id: flake8 -- repo: local +- repo: https://github.com/ikamensh/flynt/ + rev: '0.51' hooks: - - id: flynt - name: flynt - entry: flynt - language: system - types: [python] + - id: flynt \ No newline at end of file diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 6c07fac4c1a..5025bebf56b 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,4 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear -flynt==0.50 +flynt==0.51 # keep in sync with .pre-commit-config.yaml From f3709604fd4e1fbad353c0217a16b794b9523c22 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 18:39:40 +0200 Subject: [PATCH 377/653] Add comment to keep things in sync --- .pre-commit-config.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 0e8cbc01f65..7cb01199e73 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -11,7 +11,7 @@ rev: '3.8.1' # keep in sync with tests/lint_requirements.txt hooks: - id: flake8 -- repo: https://github.com/ikamensh/flynt/ - rev: '0.51' +- repo: https://github.com/ikamensh/flynt + rev: '0.51' # keep in sync with tests/lint_requirements.txt hooks: - id: flynt \ No newline at end of file From b288041b1559fd6c45a469a928ea8cd474233e27 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 19:19:49 +0200 Subject: [PATCH 378/653] Update contributing Update black call with correct syntax Add isort to the doc Add flynt as automated formatter --- CONTRIBUTING.rst | 7 ++++--- 1 file changed, 4 insertions(+), 3 deletions(-) diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index fcec4870164..86e091563ba 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -723,8 +723,8 @@ Below are a list of rules for coding style in yt. Some of these rules are suggestions are not explicitly enforced, while some are enforced via automated testing. -The yt project uses ``flake8`` and ``flynt`` to report on code correctness (syntax + -anti-pattern detection), and ``black`` for automated formatting. +The yt project uses ``flake8`` to report on code correctness (syntax + +anti-pattern detection), and ``isort``, ``black`` and ``flynt`` for automated formatting. To check the coding style of your contributions locally you will need to install those tools, which can be done for instance with ``pip``: @@ -738,8 +738,9 @@ Then run the checks from the top level of the repository with .. code-block:: bash $ flake8 yt/ + $ black --check yt/ + $ isort --check yt/ $ flynt --fail-on-change --dry-run yt/ - $ black --check These will respectively print out any ``flake8`` errors or warnings that your newly added code triggers, and a list of files that are currenlty not compliant with ``black``. Note From 87174d15c02123f888327875453d1502baa52dbc Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Fri, 8 May 2020 14:21:22 -0500 Subject: [PATCH 379/653] Avoid importing 'yt' deep in the code base --- conftest.py | 4 +- yt/__init__.py | 2 - yt/config.py | 1 + yt/testing.py | 84 +++++++++++++++++++--------------------- yt/tests/test_testing.py | 4 +- 5 files changed, 44 insertions(+), 51 deletions(-) diff --git a/conftest.py b/conftest.py index eeb5a72c7c3..aad507f4729 100644 --- a/conftest.py +++ b/conftest.py @@ -14,7 +14,6 @@ import pytest import yaml -import yt from yt.config import ytcfg from yt.utilities.answer_testing import utils @@ -49,7 +48,8 @@ def pytest_configure(config): Reads in the tests/tests.yaml file. This file contains a list of each answer test's answer file (including the changeset number). """ - yt._called_from_pytest = True + + ytcfg["yt", "__withinpytest"] = "True" # Make sure that the answers dir exists. If not, try to make it if not os.path.isdir(answer_dir): os.mkdir(answer_dir) diff --git a/yt/__init__.py b/yt/__init__.py index f10da40a5fa..f0adf4ebfb5 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -175,8 +175,6 @@ from yt.units.unit_systems import UnitSystem, unit_system_registry -_called_from_pytest = False - def _check_deprecated_parameters(): from yt.config import ytcfg diff --git a/yt/config.py b/yt/config.py index e2a2244fd1d..284c5b6323e 100644 --- a/yt/config.py +++ b/yt/config.py @@ -14,6 +14,7 @@ inline="False", numthreads="-1", __withintesting="False", + __withinpytest="False", __parallel="False", __global_parallel_rank="0", __global_parallel_size="1", diff --git a/yt/testing.py b/yt/testing.py index 97cf8656995..8edbd98fd8f 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -13,7 +13,6 @@ from numpy.random import RandomState from unyt.exceptions import UnitOperationError -import yt from yt.config import ytcfg from yt.convenience import load from yt.funcs import iterable @@ -877,61 +876,56 @@ def units_override_check(fn): # This is an export of the 40 grids in IsolatedGalaxy that are of level 4 or # lower. It's just designed to give a sample AMR index to deal with. _amr_grid_index = [ - [0, [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [32, 32, 32],], - [1, [0.25, 0.21875, 0.25], [0.5, 0.5, 0.5], [16, 18, 16],], - [1, [0.5, 0.21875, 0.25], [0.75, 0.5, 0.5], [16, 18, 16],], - [1, [0.21875, 0.5, 0.25], [0.5, 0.75, 0.5], [18, 16, 16],], - [1, [0.5, 0.5, 0.25], [0.75, 0.75, 0.5], [16, 16, 16],], - [1, [0.25, 0.25, 0.5], [0.5, 0.5, 0.75], [16, 16, 16],], - [1, [0.5, 0.25, 0.5], [0.75, 0.5, 0.75], [16, 16, 16],], - [1, [0.25, 0.5, 0.5], [0.5, 0.75, 0.75], [16, 16, 16],], - [1, [0.5, 0.5, 0.5], [0.75, 0.75, 0.75], [16, 16, 16],], - [2, [0.5, 0.5, 0.5], [0.71875, 0.71875, 0.71875], [28, 28, 28],], - [3, [0.5, 0.5, 0.5], [0.6640625, 0.65625, 0.6796875], [42, 40, 46],], - [4, [0.5, 0.5, 0.5], [0.59765625, 0.6015625, 0.6015625], [50, 52, 52],], - [2, [0.28125, 0.5, 0.5], [0.5, 0.734375, 0.71875], [28, 30, 28],], - [3, [0.3359375, 0.5, 0.5], [0.5, 0.671875, 0.6640625], [42, 44, 42],], - [4, [0.40625, 0.5, 0.5], [0.5, 0.59765625, 0.59765625], [48, 50, 50],], - [2, [0.5, 0.28125, 0.5], [0.71875, 0.5, 0.71875], [28, 28, 28],], - [3, [0.5, 0.3359375, 0.5], [0.671875, 0.5, 0.6640625], [44, 42, 42],], - [4, [0.5, 0.40625, 0.5], [0.6015625, 0.5, 0.59765625], [52, 48, 50],], - [2, [0.28125, 0.28125, 0.5], [0.5, 0.5, 0.71875], [28, 28, 28],], - [3, [0.3359375, 0.3359375, 0.5], [0.5, 0.5, 0.671875], [42, 42, 44],], + [0, [0.0, 0.0, 0.0], [1.0, 1.0, 1.0], [32, 32, 32]], + [1, [0.25, 0.21875, 0.25], [0.5, 0.5, 0.5], [16, 18, 16]], + [1, [0.5, 0.21875, 0.25], [0.75, 0.5, 0.5], [16, 18, 16]], + [1, [0.21875, 0.5, 0.25], [0.5, 0.75, 0.5], [18, 16, 16]], + [1, [0.5, 0.5, 0.25], [0.75, 0.75, 0.5], [16, 16, 16]], + [1, [0.25, 0.25, 0.5], [0.5, 0.5, 0.75], [16, 16, 16]], + [1, [0.5, 0.25, 0.5], [0.75, 0.5, 0.75], [16, 16, 16]], + [1, [0.25, 0.5, 0.5], [0.5, 0.75, 0.75], [16, 16, 16]], + [1, [0.5, 0.5, 0.5], [0.75, 0.75, 0.75], [16, 16, 16]], + [2, [0.5, 0.5, 0.5], [0.71875, 0.71875, 0.71875], [28, 28, 28]], + [3, [0.5, 0.5, 0.5], [0.6640625, 0.65625, 0.6796875], [42, 40, 46]], + [4, [0.5, 0.5, 0.5], [0.59765625, 0.6015625, 0.6015625], [50, 52, 52]], + [2, [0.28125, 0.5, 0.5], [0.5, 0.734375, 0.71875], [28, 30, 28]], + [3, [0.3359375, 0.5, 0.5], [0.5, 0.671875, 0.6640625], [42, 44, 42]], + [4, [0.40625, 0.5, 0.5], [0.5, 0.59765625, 0.59765625], [48, 50, 50]], + [2, [0.5, 0.28125, 0.5], [0.71875, 0.5, 0.71875], [28, 28, 28]], + [3, [0.5, 0.3359375, 0.5], [0.671875, 0.5, 0.6640625], [44, 42, 42]], + [4, [0.5, 0.40625, 0.5], [0.6015625, 0.5, 0.59765625], [52, 48, 50]], + [2, [0.28125, 0.28125, 0.5], [0.5, 0.5, 0.71875], [28, 28, 28]], + [3, [0.3359375, 0.3359375, 0.5], [0.5, 0.5, 0.671875], [42, 42, 44]], [ 4, [0.46484375, 0.37890625, 0.50390625], [0.4765625, 0.390625, 0.515625], [6, 6, 6], ], - [4, [0.40625, 0.40625, 0.5], [0.5, 0.5, 0.59765625], [48, 48, 50],], - [2, [0.5, 0.5, 0.28125], [0.71875, 0.71875, 0.5], [28, 28, 28],], - [3, [0.5, 0.5, 0.3359375], [0.6796875, 0.6953125, 0.5], [46, 50, 42],], - [4, [0.5, 0.5, 0.40234375], [0.59375, 0.6015625, 0.5], [48, 52, 50],], - [2, [0.265625, 0.5, 0.28125], [0.5, 0.71875, 0.5], [30, 28, 28],], - [3, [0.3359375, 0.5, 0.328125], [0.5, 0.65625, 0.5], [42, 40, 44],], - [4, [0.40234375, 0.5, 0.40625], [0.5, 0.60546875, 0.5], [50, 54, 48],], - [2, [0.5, 0.265625, 0.28125], [0.71875, 0.5, 0.5], [28, 30, 28],], - [3, [0.5, 0.3203125, 0.328125], [0.6640625, 0.5, 0.5], [42, 46, 44],], - [4, [0.5, 0.3984375, 0.40625], [0.546875, 0.5, 0.5], [24, 52, 48],], - [4, [0.546875, 0.41796875, 0.4453125], [0.5625, 0.4375, 0.5], [8, 10, 28],], - [ - 4, - [0.546875, 0.453125, 0.41796875], - [0.5546875, 0.48046875, 0.4375], - [4, 14, 10], - ], - [4, [0.546875, 0.4375, 0.4375], [0.609375, 0.5, 0.5], [32, 32, 32],], - [4, [0.546875, 0.4921875, 0.41796875], [0.56640625, 0.5, 0.4375], [10, 4, 10],], + [4, [0.40625, 0.40625, 0.5], [0.5, 0.5, 0.59765625], [48, 48, 50]], + [2, [0.5, 0.5, 0.28125], [0.71875, 0.71875, 0.5], [28, 28, 28]], + [3, [0.5, 0.5, 0.3359375], [0.6796875, 0.6953125, 0.5], [46, 50, 42]], + [4, [0.5, 0.5, 0.40234375], [0.59375, 0.6015625, 0.5], [48, 52, 50]], + [2, [0.265625, 0.5, 0.28125], [0.5, 0.71875, 0.5], [30, 28, 28]], + [3, [0.3359375, 0.5, 0.328125], [0.5, 0.65625, 0.5], [42, 40, 44]], + [4, [0.40234375, 0.5, 0.40625], [0.5, 0.60546875, 0.5], [50, 54, 48]], + [2, [0.5, 0.265625, 0.28125], [0.71875, 0.5, 0.5], [28, 30, 28]], + [3, [0.5, 0.3203125, 0.328125], [0.6640625, 0.5, 0.5], [42, 46, 44]], + [4, [0.5, 0.3984375, 0.40625], [0.546875, 0.5, 0.5], [24, 52, 48]], + [4, [0.546875, 0.41796875, 0.4453125], [0.5625, 0.4375, 0.5], [8, 10, 28]], + [4, [0.546875, 0.453125, 0.41796875], [0.5546875, 0.48046875, 0.4375], [4, 14, 10]], + [4, [0.546875, 0.4375, 0.4375], [0.609375, 0.5, 0.5], [32, 32, 32]], + [4, [0.546875, 0.4921875, 0.41796875], [0.56640625, 0.5, 0.4375], [10, 4, 10]], [ 4, [0.546875, 0.48046875, 0.41796875], [0.5703125, 0.4921875, 0.4375], [12, 6, 10], ], - [4, [0.55859375, 0.46875, 0.43359375], [0.5703125, 0.48046875, 0.4375], [6, 6, 2],], - [2, [0.265625, 0.28125, 0.28125], [0.5, 0.5, 0.5], [30, 28, 28],], - [3, [0.328125, 0.3359375, 0.328125], [0.5, 0.5, 0.5], [44, 42, 44],], - [4, [0.4140625, 0.40625, 0.40625], [0.5, 0.5, 0.5], [44, 48, 48],], + [4, [0.55859375, 0.46875, 0.43359375], [0.5703125, 0.48046875, 0.4375], [6, 6, 2]], + [2, [0.265625, 0.28125, 0.28125], [0.5, 0.5, 0.5], [30, 28, 28]], + [3, [0.328125, 0.3359375, 0.328125], [0.5, 0.5, 0.5], [44, 42, 44]], + [4, [0.4140625, 0.40625, 0.40625], [0.5, 0.5, 0.5], [44, 48, 48]], ] @@ -1236,7 +1230,7 @@ def skip(*args, **kwargs): print(msg) pytest.skip(msg) - if yt._called_from_pytest: + if ytcfg.getboolean("yt", "__withinpytest"): return skip else: return lambda: None diff --git a/yt/tests/test_testing.py b/yt/tests/test_testing.py index cc26240d0e6..5e739bfddb3 100644 --- a/yt/tests/test_testing.py +++ b/yt/tests/test_testing.py @@ -2,7 +2,7 @@ import numpy as np import pytest -import yt +from yt.config import ytcfg from yt.testing import assert_equal, requires_backend @@ -19,7 +19,7 @@ def plot_b(): return True assert_equal(plot_b(), True) - if not yt._called_from_pytest: + if not ytcfg.getboolean("yt", "__withinpytest"): assert_equal(plot_a(), None) else: # NOTE: This doesn't actually work. pytest.skip() doesn't actually From 97aec85dedd88683cd67ef0d2e96d61cc70305d0 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Tue, 11 Aug 2020 13:10:19 -0500 Subject: [PATCH 380/653] Add a check for reduced dimensionality slices --- .../stream/tests/test_stream_unstructured.py | 3 +++ yt/geometry/selection_routines.pyx | 17 +++++++++++++++++ 2 files changed, 20 insertions(+) diff --git a/yt/frontends/stream/tests/test_stream_unstructured.py b/yt/frontends/stream/tests/test_stream_unstructured.py index e35c957cb40..365f13b0272 100644 --- a/yt/frontends/stream/tests/test_stream_unstructured.py +++ b/yt/frontends/stream/tests/test_stream_unstructured.py @@ -22,8 +22,11 @@ def test_multi_mesh(): ds = load_unstructured_mesh(connectList, coordsMulti, dataList) sl = SlicePlot(ds, "z", ("connect1", "test")) + assert sl.data_source.field_data["connect1", "test"].shape == (1, 3) sl = SlicePlot(ds, "z", ("connect2", "test")) + assert sl.data_source.field_data["connect2", "test"].shape == (1, 3) sl = SlicePlot(ds, "z", ("all", "test")) + assert sl.data_source.field_data["all", "test"].shape == (2, 3) sl.annotate_mesh_lines() diff --git a/yt/geometry/selection_routines.pyx b/yt/geometry/selection_routines.pyx index 8821e454d66..50c098fccd8 100644 --- a/yt/geometry/selection_routines.pyx +++ b/yt/geometry/selection_routines.pyx @@ -1473,10 +1473,19 @@ cdef class SliceSelector(SelectorObject): cdef int axis cdef np.float64_t coord cdef int ax, ay + cdef int reduced_dimensionality def __init__(self, dobj): self.axis = dobj.axis self.coord = _ensure_code(dobj.coord) + # If we have a reduced dimensionality dataset, we want to avoid any + # checks against it in the axes that are beyond its dimensionality. + # This means that if we have a 2D dataset, *all* slices along z will + # select all the zones. + if self.axis >= dobj.ds.dimensionality: + self.reduced_dimensionality = 1 + else: + self.reduced_dimensionality = 0 self.ax = (self.axis+1) % 3 self.ay = (self.axis+2) % 3 @@ -1529,6 +1538,8 @@ cdef class SliceSelector(SelectorObject): @cython.wraparound(False) @cython.cdivision(True) cdef int select_cell(self, np.float64_t pos[3], np.float64_t dds[3]) nogil: + if self.reduced_dimensionality == 1: + return 1 if pos[self.axis] + 0.5*dds[self.axis] > self.coord \ and pos[self.axis] - 0.5*dds[self.axis] - grid_eps <= self.coord: return 1 @@ -1542,6 +1553,8 @@ cdef class SliceSelector(SelectorObject): @cython.wraparound(False) @cython.cdivision(True) cdef int select_sphere(self, np.float64_t pos[3], np.float64_t radius) nogil: + if self.reduced_dimensionality == 1: + return 1 cdef np.float64_t dist = self.periodic_difference( pos[self.axis], self.coord, self.axis) if dist*dist < radius*radius: @@ -1553,6 +1566,8 @@ cdef class SliceSelector(SelectorObject): @cython.cdivision(True) cdef int select_bbox(self, np.float64_t left_edge[3], np.float64_t right_edge[3]) nogil: + if self.reduced_dimensionality == 1: + return 1 if left_edge[self.axis] - grid_eps <= self.coord < right_edge[self.axis]: return 1 return 0 @@ -1562,6 +1577,8 @@ cdef class SliceSelector(SelectorObject): @cython.cdivision(True) cdef int select_bbox_edge(self, np.float64_t left_edge[3], np.float64_t right_edge[3]) nogil: + if self.reduced_dimensionality == 1: + return 2 if left_edge[self.axis] - grid_eps <= self.coord < right_edge[self.axis]: return 2 # a box with non-zero volume can't be inside a plane return 0 From b4a2bcd477cbd373f046dab4ed2efff2ef2bab65 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 20:23:52 +0200 Subject: [PATCH 381/653] Make sure to ignore extern --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .travis.yml | 2 +- CONTRIBUTING.rst | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 143f27036e3..8a3b71a47d6 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -23,7 +23,7 @@ detail. Why is this change required? What problem does it solve?--> - [ ] pass `black --check yt/` - [ ] pass `isort . --check --diff` - [ ] pass `flake8 yt/` -- [ ] pass `flynt --fail-on-change --dry-run yt/` +- [ ] pass `flynt --fail-on-change --dry-run -e yt/extern yt/` - [ ] New features are documented, with docstrings and narrative docs - [ ] Adds a test for any bugs fixed. Adds tests for new features. diff --git a/.travis.yml b/.travis.yml index 586332155ad..14db693072a 100644 --- a/.travis.yml +++ b/.travis.yml @@ -98,7 +98,7 @@ jobs: python: 3.6 script: | flynt -h | head -n 1 - flynt --fail-on-change --dry-run yt/ + flynt --fail-on-change --dry-run -e yt/extern yt/ - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 86e091563ba..6a7e2339bc2 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -740,7 +740,7 @@ Then run the checks from the top level of the repository with $ flake8 yt/ $ black --check yt/ $ isort --check yt/ - $ flynt --fail-on-change --dry-run yt/ + $ flynt --fail-on-change --dry-run -e yt/extern yt/ These will respectively print out any ``flake8`` errors or warnings that your newly added code triggers, and a list of files that are currenlty not compliant with ``black``. Note From adadaf8ade539a2b6d4f07fce87c96437281c1d0 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 11 Aug 2020 21:20:02 +0100 Subject: [PATCH 382/653] Check for current_redshift before setting things. --- yt/data_objects/static_output.py | 73 +++++++++++++++++--------------- 1 file changed, 39 insertions(+), 34 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 0bfea3e171e..d6d03426e30 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -39,7 +39,7 @@ SphericalCoordinateHandler, ) from yt.units import UnitContainer, _wrap_display_ytarray -from yt.units.dimensions import current_mks +from yt.units.dimensions import current_mks, length from yt.units.unit_object import Unit, define_unit from yt.units.unit_registry import UnitRegistry from yt.units.unit_systems import create_code_unit_system, unit_system_registry @@ -1110,47 +1110,52 @@ def set_units(self): Creates the unit registry for this dataset. """ - from yt.units.dimensions import length if getattr(self, "cosmological_simulation", False): # this dataset is cosmological, so add cosmological units. self.unit_registry.modify("h", self.hubble_constant) - # Comoving lengths - for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit - my_u = Unit(my_unit, registry=self.unit_registry) - self.unit_registry.add( - new_unit, - my_u.base_value / (1 + self.current_redshift), - length, - "\\rm{%s}/(1+z)" % my_unit, - prefixable=True, - ) - self.unit_registry.modify("a", 1 / (1 + self.current_redshift)) + if getattr(self, "current_redshift", None): + # Comoving lengths + for my_unit in ["m", "pc", "AU", "au"]: + new_unit = "%scm" % my_unit + my_u = Unit(my_unit, registry=self.unit_registry) + self.unit_registry.add( + new_unit, + my_u.base_value / (1 + self.current_redshift), + length, + "\\rm{%s}/(1+z)" % my_unit, + prefixable=True, + ) + self.unit_registry.modify("a", 1 / (1 + self.current_redshift)) self.set_code_units() def setup_cosmology(self): - if getattr(self, "cosmological_simulation", False): - # this dataset is cosmological, add a cosmology object - - # Set dynamical dark energy parameters - use_dark_factor = getattr(self, "use_dark_factor", False) - w_0 = getattr(self, "w_0", -1.0) - w_a = getattr(self, "w_a", 0.0) - - # many frontends do not set this - setdefaultattr(self, "omega_radiation", 0.0) - - self.cosmology = Cosmology( - hubble_constant=self.hubble_constant, - omega_matter=self.omega_matter, - omega_lambda=self.omega_lambda, - omega_radiation=self.omega_radiation, - use_dark_factor=use_dark_factor, - w_0=w_0, - w_a=w_a, - ) + """ + If this dataset is cosmological, add a cosmology object. + """ + if not getattr(self, "cosmological_simulation", False): + return + + # Set dynamical dark energy parameters + use_dark_factor = getattr(self, "use_dark_factor", False) + w_0 = getattr(self, "w_0", -1.0) + w_a = getattr(self, "w_a", 0.0) + + # many frontends do not set this + setdefaultattr(self, "omega_radiation", 0.0) + + self.cosmology = Cosmology( + hubble_constant=self.hubble_constant, + omega_matter=self.omega_matter, + omega_lambda=self.omega_lambda, + omega_radiation=self.omega_radiation, + use_dark_factor=use_dark_factor, + w_0=w_0, + w_a=w_a, + ) + + if getattr(self, "current_redshift", None): self.critical_density = self.cosmology.critical_density( self.current_redshift ) From 7e384964f550cd93134f76c65dc91f9fbc490512 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 07:14:25 +0200 Subject: [PATCH 383/653] EOF line --- .pre-commit-config.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 7cb01199e73..62efb8ce42d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -14,4 +14,4 @@ - repo: https://github.com/ikamensh/flynt rev: '0.51' # keep in sync with tests/lint_requirements.txt hooks: - - id: flynt \ No newline at end of file + - id: flynt From d808b49ac5ab836e3ea3d00ef2fc77ac722707ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 07:15:55 +0200 Subject: [PATCH 384/653] add an inline comment --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index fea0f5b748d..452f4dce32f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -103,7 +103,7 @@ jobs: name: "flynt" python: 3.6 script: | - flynt -h | head -n 1 + flynt -h | head -n 1 # this is a workaround to print only the version number flynt --fail-on-change --dry-run -e yt/extern yt/ - stage: tests From e7068b0c02ad77929040b9cee7aa17abee773258 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 07:34:44 +0200 Subject: [PATCH 385/653] fix a mistake in flynt commands --- .github/PULL_REQUEST_TEMPLATE.md | 2 +- .travis.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 8a3b71a47d6..2a03264ff7a 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -23,7 +23,7 @@ detail. Why is this change required? What problem does it solve?--> - [ ] pass `black --check yt/` - [ ] pass `isort . --check --diff` - [ ] pass `flake8 yt/` -- [ ] pass `flynt --fail-on-change --dry-run -e yt/extern yt/` +- [ ] pass `flynt yt/ --fail-on-change --dry-run -e yt/extern` - [ ] New features are documented, with docstrings and narrative docs - [ ] Adds a test for any bugs fixed. Adds tests for new features. diff --git a/.travis.yml b/.travis.yml index 452f4dce32f..9b55523c2df 100644 --- a/.travis.yml +++ b/.travis.yml @@ -104,7 +104,7 @@ jobs: python: 3.6 script: | flynt -h | head -n 1 # this is a workaround to print only the version number - flynt --fail-on-change --dry-run -e yt/extern yt/ + flynt yt/ --fail-on-change --dry-run -e yt/extern - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" From 18ff333b89447d6286c3d65023e40d0eaa7e522b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 30 Jul 2020 09:28:12 +0200 Subject: [PATCH 386/653] fix bugbear B005 errors --- setup.cfg | 1 - yt/frontends/fits/data_structures.py | 4 ++-- yt/utilities/sdf.py | 2 +- 3 files changed, 3 insertions(+), 4 deletions(-) diff --git a/setup.cfg b/setup.cfg index ac482fd533c..6f833c2888c 100644 --- a/setup.cfg +++ b/setup.cfg @@ -36,7 +36,6 @@ ignore = E203, # Whitespace before ':' (black compatibility) E741, # Do not use variables named 'I', 'O', or 'l' W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' - B005, # "Using .strip() with multi-character strings is misleading the reader." B302, # this is a python 3 compatibility warning, not relevant since don't support python 2 anymore jobs=8 diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index d95ae89b909..b48a48803f5 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -880,7 +880,7 @@ def _determine_wcs(self): for k, v in self.primary_header.items(): if k.startswith("TTYP"): if v.lower() in ["x", "y"]: - num = k.strip("TTYPE") + num = k.replace("TTYPE", "") self.events_info[v.lower()] = ( self.primary_header["TLMIN" + num], self.primary_header["TLMAX" + num], @@ -890,7 +890,7 @@ def _determine_wcs(self): self.primary_header["TCRPX" + num], ) elif v.lower() in ["energy", "time"]: - num = k.strip("TTYPE") + num = k.replace("TTYPE", "") unit = self.primary_header["TUNIT" + num].lower() if unit.endswith("ev"): unit = unit.replace("ev", "eV") diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index 8df3933a4bc..e9364173d05 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -414,7 +414,7 @@ def parse_struct(self, line, ascfile): str_types.append((v, vtype)) l = ascfile.readline() num = l.strip("}[]") - num = num.strip("\;\\\n]") + num = num.strip("\;\\\n]") # NOQA B005 if len(num) == 0: # We need to compute the number of records. The DataStruct will # handle this. From ab540c419f8d4b3d4fe119803a6eca9473fa2055 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 30 Jul 2020 12:06:38 +0200 Subject: [PATCH 387/653] fix too-long lines (strings and comments) --- setup.cfg | 1 - yt/convenience.py | 3 +- yt/data_objects/data_containers.py | 10 +- yt/data_objects/image_array.py | 24 ++- yt/data_objects/level_sets/clump_tools.py | 6 +- yt/data_objects/selection_data_containers.py | 3 +- yt/data_objects/static_output.py | 11 +- yt/data_objects/time_series.py | 4 +- yt/fields/fluid_fields.py | 3 +- yt/fields/interpolated_fields.py | 3 +- yt/frontends/_skeleton/data_structures.py | 4 +- yt/frontends/_skeleton/io.py | 5 +- yt/frontends/adaptahop/data_structures.py | 9 +- yt/frontends/adaptahop/io.py | 2 +- yt/frontends/amrvac/data_structures.py | 64 ++++--- yt/frontends/amrvac/datfile_utils.py | 4 +- yt/frontends/amrvac/fields.py | 89 +++++----- yt/frontends/amrvac/io.py | 12 +- yt/frontends/amrvac/tests/test_outputs.py | 4 +- .../amrvac/tests/test_read_amrvac_namelist.py | 6 +- yt/frontends/athena/data_structures.py | 5 +- yt/frontends/boxlib/data_structures.py | 29 +-- yt/frontends/enzo/simulation_handling.py | 3 +- yt/frontends/gadget/data_structures.py | 5 +- yt/frontends/gamer/data_structures.py | 17 +- yt/frontends/open_pmd/data_structures.py | 29 +-- yt/frontends/open_pmd/fields.py | 15 +- yt/frontends/open_pmd/io.py | 13 +- yt/frontends/open_pmd/misc.py | 15 +- yt/frontends/ramses/data_structures.py | 15 +- yt/frontends/ramses/field_handlers.py | 10 +- yt/frontends/ramses/fields.py | 7 +- yt/frontends/sdf/tests/test_outputs.py | 3 +- yt/frontends/stream/io.py | 3 - yt/frontends/tipsy/io.py | 4 +- yt/geometry/grid_geometry_handler.py | 3 +- yt/tests/test_load_errors.py | 7 +- yt/utilities/command_line.py | 9 +- yt/utilities/cosmology.py | 5 +- yt/utilities/exceptions.py | 4 +- yt/utilities/linear_interpolators.py | 3 +- yt/utilities/math_utils.py | 13 +- yt/utilities/orientation.py | 5 +- yt/utilities/particle_generator.py | 17 +- yt/utilities/sdf.py | 8 +- yt/visualization/fits_image.py | 2 +- yt/visualization/image_writer.py | 6 +- yt/visualization/line_plot.py | 14 +- yt/visualization/plot_container.py | 13 +- yt/visualization/plot_modifications.py | 27 ++- yt/visualization/plot_window.py | 165 +++++++++--------- .../volume_rendering/off_axis_projection.py | 3 +- .../volume_rendering/render_source.py | 35 ++-- yt/visualization/volume_rendering/scene.py | 5 +- .../volume_rendering/shader_objects.py | 4 +- .../tests/test_off_axis_SPH.py | 17 +- .../volume_rendering/transfer_functions.py | 3 +- 57 files changed, 449 insertions(+), 359 deletions(-) diff --git a/setup.cfg b/setup.cfg index 6f833c2888c..86f0b95fe09 100644 --- a/setup.cfg +++ b/setup.cfg @@ -32,7 +32,6 @@ ignore = E203, # Whitespace before ':' (black compatibility) E266, # Too many leading '#' for block comment E302, # Expected 2 blank lines, found 0 E306, # Expected 1 blank line before a nested definition - E501, # Line too long (black compatibility) E741, # Do not use variables named 'I', 'O', or 'l' W503, # Line break occurred before a binary operator (black compatibility) W605, # Invalid escape sequence 'x' diff --git a/yt/convenience.py b/yt/convenience.py index 13902ac84dc..c7d89cddb63 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -56,7 +56,8 @@ def load(fn, *args, **kwargs): return DatasetSeries(fn, *args, **kwargs) - # Unless the dataset starts with http, look for it using the path or relative to the data dir (in this order). + # Unless the dataset starts with http, + # look for it using the path or relative to the data dir (in this order). if not (os.path.exists(fn) or fn.startswith("http")): data_dir = ytcfg.get("yt", "test_data_dir") alt_fn = os.path.join(data_dir, fn) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index fc04021b06e..fea2377141c 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -99,15 +99,17 @@ def __init__(self, ds, field_parameters): sets its initial set of fields, and the remainder of the arguments are passed as field_parameters. """ - # ds is typically set in the new object type created in Dataset._add_object_class - # but it can also be passed as a parameter to the constructor, in which case it will - # override the default. This code ensures it is never not set. + # ds is typically set in the new object type created in + # Dataset._add_object_class but it can also be passed as a parameter to the + # constructor, in which case it will override the default. + # This code ensures it is never not set. if ds is not None: self.ds = ds else: if not hasattr(self, "ds"): raise RuntimeError( - "Error: ds must be set either through class type or parameter to the constructor" + "Error: ds must be set either through class type " + "or parameter to the constructor" ) self._current_particle_type = "all" diff --git a/yt/data_objects/image_array.py b/yt/data_objects/image_array.py index 8426c02774e..a325df09b28 100644 --- a/yt/data_objects/image_array.py +++ b/yt/data_objects/image_array.py @@ -107,9 +107,15 @@ def write_hdf5(self, filename, dataset_name=None): ... for k in range(im.shape[2]): ... im[i,:,k] = np.linspace(0.,0.3*k, im.shape[1]) - >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), - ... 'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]), - ... 'width':0.245, 'units':'cm', 'type':'rendering'} + >>> myinfo = { + ... 'field':'dinosaurs', + ... 'east_vector':np.array([1.,0.,0.]), + ... 'north_vector':np.array([0.,0.,1.]), + ... 'normal_vector':np.array([0.,1.,0.]), + ... 'width':0.245, + ... 'units':'cm', + ... 'type':'rendering' + ... } >>> im_arr = ImageArray(im, info=myinfo) >>> im_arr.write_hdf5('test_ImageArray.h5') @@ -361,9 +367,15 @@ def write_image( >>> for i in range(im.shape[0]): ... im[i,:] = np.linspace(0.,0.3*i, im.shape[1]) - >>> myinfo = {'field':'dinosaurs', 'east_vector':np.array([1.,0.,0.]), - ... 'north_vector':np.array([0.,0.,1.]), 'normal_vector':np.array([0.,1.,0.]), - ... 'width':0.245, 'units':'cm', 'type':'rendering'} + >>> myinfo = { + ... 'field':'dinosaurs', + ... 'east_vector':np.array([1.,0.,0.]), + ... 'north_vector':np.array([0.,0.,1.]), + ... 'normal_vector':np.array([0.,1.,0.]), + ... 'width':0.245, + ... 'units':'cm', + ... 'type':'rendering' + ... } >>> im_arr = ImageArray(im, info=myinfo) >>> im_arr.write_image('test_ImageArray.png') diff --git a/yt/data_objects/level_sets/clump_tools.py b/yt/data_objects/level_sets/clump_tools.py index 36b6ca628e7..d1374dc3963 100644 --- a/yt/data_objects/level_sets/clump_tools.py +++ b/yt/data_objects/level_sets/clump_tools.py @@ -40,8 +40,10 @@ def return_all_clumps(clump): def return_bottom_clumps(clump, dbg=0): - """Recursively return clumps at the bottom of the index. - This gives a list of clumps similar to what one would get from a CLUMPFIND routine""" + """ + Recursively return clumps at the bottom of the index. + This gives a list of clumps similar to what one would get from a CLUMPFIND routine + """ global counter counter = 0 list = [] diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 80f041db515..3ac09205ad6 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -1121,7 +1121,8 @@ def _cond_ind(self): locals = self.locals.copy() if "obj" in locals: raise RuntimeError( - '"obj" has been defined in the "locals" ; this is not supported, please rename the variable.' + "'obj' has been defined in the 'locals' ;" + "this is not supported, please rename the variable." ) locals["obj"] = obj with obj._field_parameter_state(self.field_parameters): diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index d52825a3fdb..0913e0ee21d 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -284,13 +284,15 @@ def _is_valid(cls, *args, **kwargs): @abc.abstractmethod def _parse_parameter_file(self): # set up various attributes from self.parameter_filename - # see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here + # for a full description of what is required here see + # yt.frontends._skeleton.SkeletonDataset pass @abc.abstractmethod def _set_code_unit_attributes(self): # set up code-units to physical units normalization factors - # see yt.frontends._skeleton.SkeletonDataset for a full description of what is required here + # for a full description of what is required here see + # yt.frontends._skeleton.SkeletonDataset pass def _set_derived_attrs(self): @@ -1579,8 +1581,9 @@ def add_gradient_fields(self, input_field): ('gas', 'temperature_gradient_z'), ('gas', 'temperature_gradient_magnitude')] - Note that the above example assumes ds.geometry == 'cartesian'. In general, the function - will create gradients components along the axes of the dataset coordinate system. + Note that the above example assumes ds.geometry == 'cartesian'. In general, + the function will create gradients components along the axes of the dataset + coordinate system. For instance, with cylindrical data, one gets 'temperature_gradient_' """ self.index diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index bcc0e9967b5..6ab907d3b44 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -473,8 +473,8 @@ def particle_trajectories( Note ---- - This function will fail if there are duplicate particle ids or if some of the particle - disappear. + This function will fail if there are duplicate particle ids or if some of the + particle disappear. """ return ParticleTrajectories( self, indices, fields=fields, suppress_logging=suppress_logging, ptype=ptype diff --git a/yt/fields/fluid_fields.py b/yt/fields/fluid_fields.py index e71460d10c1..3aa583ac0eb 100644 --- a/yt/fields/fluid_fields.py +++ b/yt/fields/fluid_fields.py @@ -221,7 +221,8 @@ def setup_gradient_fields(registry, grad_field, field_units, slice_info=None): geom = registry.ds.geometry if is_curvilinear(geom): mylog.warning( - "In %s geometry, gradient fields may contain artifacts near cartesian axes.", + "In %s geometry, gradient fields may contain " + "artifacts near cartesian axes.", geom, ) diff --git a/yt/fields/interpolated_fields.py b/yt/fields/interpolated_fields.py index bce14c11e60..126699e7bc2 100644 --- a/yt/fields/interpolated_fields.py +++ b/yt/fields/interpolated_fields.py @@ -31,7 +31,8 @@ def add_interpolated_field( if len(axes_fields) != len(axes_data) or len(axes_fields) != len(table_data.shape): raise RuntimeError( - "Data dimension mismatch: data is %d, %d axes data provided, and %d axes fields provided." + "Data dimension mismatch: data is %d, " + "%d axes data provided, and %d axes fields provided." % (len(table_data.shape), len(axes_data), len(axes_fields)) ) diff --git a/yt/frontends/_skeleton/data_structures.py b/yt/frontends/_skeleton/data_structures.py index d0e6bf06000..73d7bd2acaf 100644 --- a/yt/frontends/_skeleton/data_structures.py +++ b/yt/frontends/_skeleton/data_structures.py @@ -140,7 +140,9 @@ def _parse_parameter_file(self): # self.unique_identifier <= unique identifier for the dataset # being read (e.g., UUID or ST_CTIME) (int) # - # self.geometry (defaults to 'cartesian') <= a lower case string ("cartesian", "polar", "cylindrical"...) + # self.geometry <= a lower case string + # ("cartesian", "polar", "cylindrical"...) + # (defaults to 'cartesian') pass @classmethod diff --git a/yt/frontends/_skeleton/io.py b/yt/frontends/_skeleton/io.py index 3fc8917a083..19c7ed8cdde 100644 --- a/yt/frontends/_skeleton/io.py +++ b/yt/frontends/_skeleton/io.py @@ -34,8 +34,9 @@ def _read_fluid_selection(self, chunks, selector, fields, size): # transpose is required (e.g., using np_array.transpose() or # np_array.swapaxes(0,2)). - # Note this method is not abstract, and has a default implementation in the base class. - # However, the default implementation requires that the method io_iter be defined + # This method is not abstract, and has a default implementation + # in the base class.However, the default implementation requires that the method + # io_iter be defined pass def _read_chunk_data(self, chunk, fields): diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 04bbf72960e..022bca290bf 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -49,7 +49,8 @@ def __init__( self.over_refine_factor = over_refine_factor if parent_ds is None: raise RuntimeError( - "The AdaptaHOP frontend requires a parent dataset to be passed as `parent_ds`." + "The AdaptaHOP frontend requires a parent dataset " + "to be passed as `parent_ds`." ) self.parent_ds = parent_ds @@ -178,8 +179,10 @@ class AdaptaHOPHaloContainer(YTSelectionContainer): -------- >>> import yt - >>> ds = yt.load('output_00080_halos/tree_bricks080', parent_ds=yt.load('output_00080/info_00080.txt')) - >>> + >>> ds = yt.load( + ... 'output_00080_halos/tree_bricks080', + ... parent_ds=yt.load('output_00080/info_00080.txt') + ... ) >>> ds.halo(1, ptype='io') >>> print(halo.mass) 119.22804260253906 100000000000.0*Msun diff --git a/yt/frontends/adaptahop/io.py b/yt/frontends/adaptahop/io.py index 2a7d0419a38..556929a4034 100644 --- a/yt/frontends/adaptahop/io.py +++ b/yt/frontends/adaptahop/io.py @@ -209,7 +209,7 @@ def _get_particle_positions(self): # Make sure halos are loaded in increasing halo_id order assert np.all(np.diff(offset_map[:, 0]) > 0) - # Cache particle positions as one do not expect a (very) large number of halos anyway + # Cache particle positions as one do not expect a large number of halos anyway self._particle_positions = data self._offsets = offset_map return data diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 487745e37ae..8b4c814f759 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -86,7 +86,10 @@ def __init__(self, ds, dataset_type="amrvac"): super(AMRVACHierarchy, self).__init__(ds, dataset_type) def _detect_output_fields(self): - """Parse field names from datfile header, which is stored in self.dataset.parameters""" + """ + Parse field names from datfile header, as stored in self.dataset.parameters + + """ # required method self.field_list = [ (self.dataset_type, f) for f in self.dataset.parameters["w_names"] @@ -110,7 +113,8 @@ def _parse_index(self): ) self.block_offsets = block_offsets - # YT uses 0-based grid indexing, lowest level = 0 (AMRVAC uses 1 for lowest level) + # YT uses 0-based grid indexing: + # lowest level = 0, while AMRVAC uses 1 for lowest level ytlevels = np.array(vaclevels, dtype="int32") - 1 self.grid_levels.flat[:] = ytlevels self.min_level = np.min(ytlevels) @@ -174,12 +178,14 @@ def __init__( Either "cgs" (default), "mks" or "code" geometry_override : str, optional - A geometry flag formatted either according to either AMRVAC's or yt's standards. - When this parameter is passed along with v5 or more newer datfiles, will precede over - their internal "geometry" tag. + A geometry flag formatted either according to either + AMRVAC or yt standards. + When this parameter is passed along with v5 or more newer datfiles, + will precede over their internal "geometry" tag. parfiles : str or list, optional - One or more parfiles to be passed to yt.frontends.amrvac.read_amrvac_parfiles() + One or more parfiles to be passed to + yt.frontends.amrvac.read_amrvac_parfiles() """ # note: geometry_override and parfiles are specific to this frontend @@ -218,7 +224,8 @@ def __init__( e_is_internal = namelist["method_list"].get("solve_internal_e", False) if c_adiab is not None: - # this complicated unit is required for the adiabatic equation of state to make physical sense + # this complicated unit is required for the adiabatic equation + # of state to make physical sense c_adiab *= ( self.mass_unit ** (1 - self.gamma) * self.length_unit ** (2 + 3 * (self.gamma - 1)) @@ -271,7 +278,7 @@ def _parse_geometry(self, geometry_tag): Returns ------- geometry_yt : str - Lower case geometry tag among "cartesian", "polar", "cylindrical", "spherical". + Lower case geometry tag "cartesian", "polar", "cylindrical" or "spherical" Examples -------- @@ -374,19 +381,20 @@ def _parse_parameter_file(self): self.omega_lambda = 0.0 self.hubble_constant = 0.0 - # units stuff =============================================================================== + # units stuff ====================================================================== def _set_code_unit_attributes(self): """Reproduce how AMRVAC internally set up physical normalisation factors.""" # required method # devnote: this method is never defined in the parent abstract class Dataset - # but it is called in Dataset.set_code_units(), which is part of Dataset.__init__() - # so it must be defined here. + # but it is called in Dataset.set_code_units(), which is part of + # Dataset.__init__() so it must be defined here. # devnote: this gets called later than Dataset._override_code_units() - # This is the reason why it uses setdefaultattr: it will only fill in the gaps left - # by the "override", instead of overriding them again. - # For the same reason, self.units_override is set, as well as corresponding *_unit instance attributes - # which may include up to 3 of the following items: length, time, mass, velocity, number_density, temperature + # This is the reason why it uses setdefaultattr: it will only fill in the gaps + # left by the "override", instead of overriding them again. + # For the same reason, self.units_override is set, as well as corresponding + # *_unit instance attributes which may include up to 3 of the following items: + # length, time, mass, velocity, number_density, temperature # note: yt sets hydrogen mass equal to proton mass, amrvac doesn't. mp_cgs = self.quan(1.672621898e-24, "g") # This value is taken from AstroPy @@ -402,7 +410,8 @@ def _set_code_unit_attributes(self): density_unit = mass_unit / length_unit ** 3 numberdensity_unit = density_unit / ((1.0 + 4.0 * He_abundance) * mp_cgs) else: - # other case: numberdensity is supplied. Fall back to one (default) if no overrides supplied + # other case: numberdensity is supplied. + # Fall back to one (default) if no overrides supplied numberdensity_override = self.units_override.get( "numberdensity_unit", (1, "cm**-3") ) @@ -423,12 +432,14 @@ def _set_code_unit_attributes(self): # in this case time was supplied velocity_unit = length_unit / self.time_unit else: - # other case: velocity was supplied. Fall back to None if no overrides supplied + # other case: velocity was supplied. + # Fall back to None if no overrides supplied velocity_unit = getattr(self, "velocity_unit", None) # 3. calculations for pressure and temperature if velocity_unit is None: - # velocity and time not given, see if temperature is given. Fall back to one (default) if not + # velocity and time not given, see if temperature is given. + # Fall back to one (default) if not temperature_unit = getattr(self, "temperature_unit", self.quan(1, "K")) pressure_unit = ( (2.0 + 3.0 * He_abundance) @@ -469,18 +480,18 @@ def _override_code_units(self): super(AMRVACDataset, self)._override_code_units() def _check_override_consistency(self): - """Check that keys in units_override are consistent with respect to AMRVAC's internal way to - set up normalisations factors. + """Check that keys in units_override are consistent with respect to AMRVAC's + internal way to set up normalisations factors. """ # frontend specific method - # YT supports overriding other normalisations, this method ensures consistency between - # supplied 'units_override' items and those used by AMRVAC. + # YT supports overriding other normalisations, this method ensures consistency + # between supplied 'units_override' items and those used by AMRVAC. # AMRVAC's normalisations/units have 3 degrees of freedom. - # Moreover, if temperature unit is specified then velocity unit will be calculated - # accordingly, and vice-versa. - # Currently we replicate this by allowing a finite set of combinations in units_override + # Moreover, if temperature unit is specified then velocity unit will be + # calculated accordingly, and vice-versa. + # We replicate this by allowing a finite set of combinations in units_override if not self.units_override: return overrides = set(self.units_override) @@ -502,7 +513,8 @@ def _check_override_consistency(self): break else: raise ValueError( - "Combination {} passed to units_override is not consistent with AMRVAC. \n" + "Combination {} passed to units_override " + "is not consistent with AMRVAC. \n" "Allowed combinations are {}".format( overrides, ALLOWED_UNIT_COMBINATIONS ) diff --git a/yt/frontends/amrvac/datfile_utils.py b/yt/frontends/amrvac/datfile_utils.py index e0c3fda3511..ad65ba6f91f 100644 --- a/yt/frontends/amrvac/datfile_utils.py +++ b/yt/frontends/amrvac/datfile_utils.py @@ -105,8 +105,8 @@ def get_header(istream): def get_tree_info(istream): """ - Read levels, morton-curve indices, and byte offsets for each block as stored in the datfile - istream is an open datfile buffer with 'rb' mode + Read levels, morton-curve indices, and byte offsets for each block as stored in the + datfile istream is an open datfile buffer with 'rb' mode This can be used as the "first pass" data reading required by YT's interface. """ istream.seek(0) diff --git a/yt/frontends/amrvac/fields.py b/yt/frontends/amrvac/fields.py index edc9d7338a9..6313ed8f902 100644 --- a/yt/frontends/amrvac/fields.py +++ b/yt/frontends/amrvac/fields.py @@ -41,8 +41,9 @@ def _velocity(field, data, idir, prefix=None): mask1 = rho == 0 if mask1.any(): mylog.info( - "zeros found in %sdensity, patching them to compute corresponding velocity field.", - prefix, + "zeros found in %sdensity, " + "patching them to compute corresponding velocity field.", + prefix ) mask2 = moment == 0 if not ((mask1 & mask2) == mask1).all(): @@ -55,42 +56,34 @@ def _velocity(field, data, idir, prefix=None): code_moment = "code_mass / code_length**2 / code_time" code_pressure = "code_mass / code_length / code_time**2" -# for now, define a finite family of dust fields (up to 100 species, should be enough) -MAXN_DUST_SPECIES = 100 -known_dust_fields = [ - ("rhod%d" % idust, (code_density, ["dust%d_density" % idust], None)) - for idust in range(1, MAXN_DUST_SPECIES + 1) -] -for idir in (1, 2, 3): - known_dust_fields += [ + +class AMRVACFieldInfo(FieldInfoContainer): + # for now, define a finite family of dust fields (up to 100 species) + MAXN_DUST_SPECIES = 100 + known_dust_fields = [ + ("rhod%d" % idust, (code_density, ["dust%d_density" % idust], None)) + for idust in range(1, MAXN_DUST_SPECIES + 1) + ] + [ ( "m%dd%d" % (idir, idust), (code_moment, ["dust%d_moment_%d" % (idust, idir)], None), ) for idust in range(1, MAXN_DUST_SPECIES + 1) + for idir in (1, 2, 3) ] - - -class AMRVACFieldInfo(FieldInfoContainer): - # format: (native(?) field, (units, [aliases], display_name)) - # note: aliases will correspond to "gas" typed fields, whereas the native ones are "amrvac" typed - known_other_fields = tuple( - list( - ( - ("rho", (code_density, ["density"], None)), - ("m1", (code_moment, ["moment_1"], None)), - ("m2", (code_moment, ["moment_2"], None)), - ("m3", (code_moment, ["moment_3"], None)), - ("e", (code_pressure, ["energy_density"], None)), - ("b1", ("code_magnetic", ["magnetic_1"], None)), - ("b2", ("code_magnetic", ["magnetic_2"], None)), - ("b3", ("code_magnetic", ["magnetic_3"], None)), - ) - ) - + known_dust_fields - # in python3, there is no need for this tuple+list conversion, it suffices to write - # known_other_fields = (..., *known_dust_fields) + # note: aliases will correspond to "gas" typed fields + # whereas the native ones are "amrvac" typed + known_other_fields = ( + ("rho", (code_density, ["density"], None)), + ("m1", (code_moment, ["moment_1"], None)), + ("m2", (code_moment, ["moment_2"], None)), + ("m3", (code_moment, ["moment_3"], None)), + ("e", (code_pressure, ["energy_density"], None)), + ("b1", ("code_magnetic", ["magnetic_1"], None)), + ("b2", ("code_magnetic", ["magnetic_2"], None)), + ("b3", ("code_magnetic", ["magnetic_3"], None)), + *known_dust_fields, ) known_particle_fields = () @@ -128,12 +121,12 @@ def _setup_velocity_fields(self, idust=None): def _setup_dust_fields(self): idust = 1 + imax = self.__class__.MAXN_DUST_SPECIES while ("amrvac", "rhod%d" % idust) in self.field_list: - if idust > MAXN_DUST_SPECIES: + if idust > imax: mylog.error( "Only the first %d dust species are currently read by yt. " - "If you read this, please consider issuing a ticket. ", - MAXN_DUST_SPECIES, + "If you read this, please consider issuing a ticket. ", imax ) break self._setup_velocity_fields(idust) @@ -173,7 +166,8 @@ def setup_fluid_fields(self): self._setup_velocity_fields() # gas velocities self._setup_dust_fields() # dust derived fields (including velocities) - # fields with nested dependencies are defined thereafter by increasing level of complexity + # fields with nested dependencies are defined thereafter + # by increasing level of complexity us = self.ds.unit_system def _kinetic_energy_density(field, data): @@ -197,15 +191,17 @@ def _magnetic_energy_density(field, data): if not ("amrvac", f"b{idim}") in self.field_list: break emag += 0.5 * data["gas", f"magnetic_{idim}"] ** 2 - # important note: in AMRVAC the magnetic field is defined in units where mu0 = 1, + # in AMRVAC the magnetic field is defined in units where mu0 = 1, # such that # Emag = 0.5*B**2 instead of Emag = 0.5*B**2 / mu0 - # To correctly transform the dimensionality from gauss**2 -> rho*v**2, we have to - # take mu0 into account. If we divide here, units when adding the field should be - # us["density"]*us["velocity"]**2. If not, they should be us["magnetic_field"]**2 - # and division should happen elsewhere. + # To correctly transform the dimensionality from gauss**2 -> rho*v**2, + # we have to take mu0 into account. If we divide here, units when adding + # the field should be us["density"]*us["velocity"]**2. + # If not, they should be us["magnetic_field"]**2 and division should + # happen elsewhere. emag /= 4 * np.pi - # divided by mu0 = 4pi in cgs, yt handles 'mks' and 'code' unit systems internally. + # divided by mu0 = 4pi in cgs, + # yt handles 'mks' and 'code' unit systems internally. return emag self.add_field( @@ -218,12 +214,12 @@ def _magnetic_energy_density(field, data): # Adding the thermal pressure field. # In AMRVAC we have multiple physics possibilities: - # - if HD/MHD + energy equation, pressure is (gamma-1)*(e - ekin (- emag)) for (M)HD - # - if HD/MHD but solve_internal_e is true in parfile, pressure is (gamma-1)*e for both - # - if (m)hd_energy is false in parfile (isothermal), pressure is c_adiab * rho**gamma + # - if HD/MHD + energy equation P = (gamma-1)*(e - ekin (- emag)) for (M)HD + # - if HD/MHD but solve_internal_e is true in parfile, P = (gamma-1)*e for both + # - if (m)hd_energy is false in parfile (isothermal), P = c_adiab * rho**gamma def _full_thermal_pressure_HD(field, data): - # important note : energy density and pressure are actually expressed in the same unit + # energy density and pressure are actually expressed in the same unit pthermal = (data.ds.gamma - 1) * ( data["gas", "energy_density"] - data["gas", "kinetic_energy_density"] ) @@ -257,7 +253,8 @@ def _adiabatic_thermal_pressure(field, data): pressure_recipe = _adiabatic_thermal_pressure mylog.info("Using adiabatic EoS for thermal pressure (isothermal).") mylog.warning( - "If you used usr_set_pthermal you should redefine the thermal_pressure field." + "If you used usr_set_pthermal you should " + "redefine the thermal_pressure field." ) if pressure_recipe is not None: diff --git a/yt/frontends/amrvac/io.py b/yt/frontends/amrvac/io.py index 39ed1d50e62..df420c5e9e6 100644 --- a/yt/frontends/amrvac/io.py +++ b/yt/frontends/amrvac/io.py @@ -89,15 +89,17 @@ def _read_fluid_selection(self, chunks, selector, fields, size): Returns ------- data_dict : dict - keys are the (ftype, fname) tuples and values are arrays that have been masked using - whatever selector method is appropriate. Arrays have dtype float64. + keys are the (ftype, fname) tuples, values are arrays that have been masked + using whatever selector method is appropriate. Arrays have dtype float64. """ # @Notes from Niels: # The chunks list has YTDataChunk objects containing the different grids. - # The list of grids can be obtained by doing eg. grids_list = chunks[0].objs or chunks[1].objs etc. - # Every element in "grids_list" is then an AMRVACGrid object, and has hence all attributes of a grid - # (Level, ActiveDimensions, LeftEdge, etc.) + # The list of grids can be obtained by doing eg. + # grids_list = chunks[0].objs or chunks[1].objs etc. + # Every element in "grids_list" is then an AMRVACGrid object, + # and has hence all attributes of a grid : + # (Level, ActiveDimensions, LeftEdge, etc.) chunks = list(chunks) data_dict = {} # <- return variable diff --git a/yt/frontends/amrvac/tests/test_outputs.py b/yt/frontends/amrvac/tests/test_outputs.py index cac74c69903..0aae58e6bb9 100644 --- a/yt/frontends/amrvac/tests/test_outputs.py +++ b/yt/frontends/amrvac/tests/test_outputs.py @@ -138,8 +138,8 @@ def test_rmi_cartesian_dust_2D(): yield test -# Tests for units: verify that overriding certain units yields the correct derived units. -# The following are correct normalisations based on length, numberdensity and temperature +# Tests for units: verify that overriding certain units yields the correct derived units +# The following are correct normalisations based on length, numberdensity and temp length_unit = (1e9, "cm") numberdensity_unit = (1e9, "cm**-3") temperature_unit = (1e6, "K") diff --git a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py index 13d0e0bb07f..4f6fc4b5cd6 100644 --- a/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py +++ b/yt/frontends/amrvac/tests/test_read_amrvac_namelist.py @@ -12,7 +12,8 @@ @requires_module("f90nml") def test_read_one_file(): - """when provided a single file, the function should merely act as a wrapper for f90nml.read()""" + """when provided a single file, the function should merely act + as a wrapper for f90nml.read()""" namelist1 = read_amrvac_namelist(blast_wave_parfile) namelist2 = f90nml.read(blast_wave_parfile) assert namelist1 == namelist2 @@ -20,7 +21,8 @@ def test_read_one_file(): @requires_module("f90nml") def test_accumulate_basename(): - """When two (or more) parfiles are passed, the filelist:base_filename should be special-cased""" + """When two (or more) parfiles are passed, + the filelist:base_filename should be special-cased""" namelist_base = f90nml.read(blast_wave_parfile) namelist_update = f90nml.read(modifier_parfile) diff --git a/yt/frontends/athena/data_structures.py b/yt/frontends/athena/data_structures.py index 783730a6718..df26ae5881b 100644 --- a/yt/frontends/athena/data_structures.py +++ b/yt/frontends/athena/data_structures.py @@ -490,8 +490,9 @@ def __init__( if k.endswith("_unit") and k not in units_override: if not already_warned: mylog.warning( - "Supplying unit conversions from the parameters dict is deprecated, " - "and will be removed in a future release. Use units_override instead." + "Supplying unit conversions from the parameters dict " + "is deprecated, and will be removed in a future release. " + "Use units_override instead." ) already_warned = True units_override[k] = self.specified_parameters.pop(k) diff --git a/yt/frontends/boxlib/data_structures.py b/yt/frontends/boxlib/data_structures.py index 1e8bafd3cb8..779d9552bec 100644 --- a/yt/frontends/boxlib/data_structures.py +++ b/yt/frontends/boxlib/data_structures.py @@ -471,18 +471,21 @@ def _cache_endianness(self, test_grid): _header_pattern[self.dimensionality - 1].search(header).groups() ) # Note that previously we were using a different value for BPR than we - # use now. Here is an example set of information directly from BoxLib: - # * DOUBLE data - # * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27 - # * FLOAT data - # * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27 + # use now. Here is an example set of information directly from BoxLib + """ + * DOUBLE data + * FAB ((8, (64 11 52 0 1 12 0 1023)),(8, (1 2 3 4 5 6 7 8)))((0,0) (63,63) (0,0)) 27 # NOQA: E501 + * FLOAT data + * FAB ((8, (32 8 23 0 1 9 0 127)),(4, (1 2 3 4)))((0,0) (63,63) (0,0)) 27 + """ if bpr == endian[0]: dtype = f"f{bpr}" else: raise ValueError( - "FAB header is neither big nor little endian. Perhaps the file is corrupt?" + "FAB header is neither big nor little endian. " + "Perhaps the file is corrupt?" ) mylog.debug("FAB header suggests dtype of %s", dtype) @@ -721,10 +724,10 @@ def _parse_cparams(self): if param == "amr.n_cell": vals = self.domain_dimensions = np.array(vals.split(), dtype="int32") - # For 1D and 2D simulations in BoxLib usually only the relevant dimensions - # have a specified number of zones, but yt requires domain_dimensions to - # have three elements, with 1 in the additional slots if we're not in 3D, - # so append them as necessary. + # For 1D and 2D simulations in BoxLib usually only the relevant + # dimensions have a specified number of zones, but yt requires + # domain_dimensions to have three elements, with 1 in the additional + # slots if we're not in 3D, so append them as necessary. if len(vals) == 1: vals = self.domain_dimensions = np.array([vals[0], 1, 1]) @@ -1157,8 +1160,10 @@ def _parse_parameter_file(self): p, v = line.strip().split(":") self.parameters[p] = v.strip() if "git describe" in line or "git hash" in line: - # Castro release 17.02 and later - line format: codename git describe: the-hash - # Castro before release 17.02 - line format: codename git hash: the-hash + # Castro release 17.02 and later + # line format: codename git describe: the-hash + # Castro before release 17.02 + # line format: codename git hash: the-hash fields = line.split(":") self.parameters[fields[0]] = fields[1].strip() line = next(f) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 1f6db1d6b5c..4b25514b8aa 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -573,7 +573,8 @@ def _calculate_simulation_bounds(self): def _set_parameter_defaults(self): """ - Set some default parameters to avoid problems if they are not in the parameter file. + Set some default parameters to avoid problems + if they are not in the parameter file. """ self.parameters["GlobalDir"] = self.directory diff --git a/yt/frontends/gadget/data_structures.py b/yt/frontends/gadget/data_structures.py index 862aa86e29a..1e4d1c64f86 100644 --- a/yt/frontends/gadget/data_structures.py +++ b/yt/frontends/gadget/data_structures.py @@ -267,7 +267,7 @@ def __init__( "Otherwise something is wrong, " "and you might want to check how the dataset is loaded. " "Futher information about header specification can be found in " - "https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.", + "https://yt-project.org/docs/dev/examining/loading_data.html#header-specification.", # NOQA E501 header_size, ) self._field_spec = self._setup_binary_spec(field_spec, gadget_field_specs) @@ -646,7 +646,8 @@ def _set_owls_eagle_units(self): # note the contents of the HDF5 Units group are in _unit_base # note the velocity stored on disk is sqrt(a) dx/dt - # physical velocity [cm/s] = a dx/dt = sqrt(a) * velocity_on_disk * UnitVelocity_in_cm_per_s + # physical velocity [cm/s] = a dx/dt + # = sqrt(a) * velocity_on_disk * UnitVelocity_in_cm_per_s self.length_unit = self.quan(self._unit_base["UnitLength_in_cm"], "cmcm/h") self.mass_unit = self.quan(self._unit_base["UnitMass_in_g"], "g/h") self.velocity_unit = self.quan( diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index 9f6ad62032e..101ecff4a54 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -179,14 +179,21 @@ def _validate_parent_children_relationship(self): # edges between children and parent for c in grid.Children: for d in range(0, 3): - assert grid.LeftEdge[d] <= c.LeftEdge[d], ( - "Grid %d, Children %d, Grid->EdgeL %14.7e, Children->EdgeL %14.7e" + msgL = ( + "Grid %d, Child %d, Grid->EdgeL %14.7e, Children->EdgeL %14.7e" % (grid.id, c.id, grid.LeftEdge[d], c.LeftEdge[d]) ) - assert grid.RightEdge[d] >= c.RightEdge[d], ( - "Grid %d, Children %d, Grid->EdgeR %14.7e, Children->EdgeR %14.7e" + msgR = ( + "Grid %d, Child %d, Grid->EdgeR %14.7e, Children->EdgeR %14.7e" % (grid.id, c.id, grid.RightEdge[d], c.RightEdge[d]) ) + if not grid.LeftEdge[d] <= c.LeftEdge[d]: + + raise ValueError(msgL) + + if not grid.RightEdge[d] >= c.RightEdge[d]: + raise ValueError(msgR) + mylog.info("Check passed") @@ -342,7 +349,7 @@ def _parse_parameter_file(self): else: self.mhd = 0 - # old data format (version < 2210) does not contain any information of code units + # old data format (version < 2210) did not contain any information of code units self.parameters.setdefault("Opt__Unit", 0) self.geometry = geometry_parameters[parameters.get("Coordinate", 1)] diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index d910b6b70db..5e82529cf4b 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -29,8 +29,8 @@ class OpenPMDGrid(AMRGridPatch): """Represents chunk of data on-disk. This defines the index and offset for every mesh and particle type. - It also defines parents and children grids. Since openPMD does not have multiple levels of refinement, - there are no parents or children for any grid. + It also defines parents and children grids. Since openPMD does not have multiple + levels of refinement there are no parents or children for any grid. """ _id_offset = 0 @@ -112,8 +112,9 @@ def _get_particle_type_counts(self): def _detect_output_fields(self): """Populates ``self.field_list`` with native fields (mesh and particle) on disk. - Each entry is a tuple of two strings. The first element is the on-disk fluid type or particle type. - The second element is the name of the field in yt. This string is later used for accessing the data. + Each entry is a tuple of two strings. The first element is the on-disk fluid + type or particle type. The second element is the name of the field in yt. + This string is later used for accessing the data. Convention suggests that the on-disk fluid type should be "openPMD", the on-disk particle type (for a single species of particles) is "io" or (for multiple species of particles) the particle name on-disk. @@ -152,8 +153,8 @@ def _detect_output_fields(self): ) elif "particlePatches" not in recname: try: - # Create a field for every axis (x,y,z) of every property (position) - # of every species (electrons) + # Create a field for every axis (x,y,z) of every + # property (position) of every species (electrons) axes = list(record.keys()) if str(recname) == "position": recname = "positionCoarse" @@ -176,7 +177,8 @@ def _detect_output_fields(self): else: pass if len(list(particles.keys())) > 1: - # There is more than one particle species, use the specific names as field types + # There is more than one particle species, + # use the specific names as field types self.field_list.extend( [ ( @@ -386,7 +388,8 @@ def _parse_index(self): particle_names = [] for (pname, size) in self.numparts.items(): if size == count: - # Since this is not part of a particlePatch, we can include multiple same-sized ptypes + # Since this is not part of a particlePatch, + # we can include multiple same-sized ptypes particle_names.append(str(pname)) handled_ptypes.append(str(pname)) else: @@ -426,9 +429,10 @@ class OpenPMDDataset(Dataset): Notes ----- - It is assumed that all meshes cover the same region. Their resolution can be different. - It is assumed that all particles reside in this same region exclusively. - It is assumed that the particle and mesh positions are *absolute* with respect to the simulation origin. + It is assumed that + - all meshes cover the same region. Their resolution can be different. + - all particles reside in this same region exclusively. + - particle and mesh positions are *absolute* with respect to the simulation origin. """ _index_class = OpenPMDHierarchy @@ -535,7 +539,8 @@ def _set_code_unit_attributes(self): """Handle conversion between different physical units and the code units. Every dataset in openPMD can have different code <-> physical scaling. - The individual factor is obtained by multiplying with "unitSI" reading getting data from disk. + The individual factor is obtained by multiplying with "unitSI" reading getting + data from disk. """ setdefaultattr(self, "length_unit", self.quan(1.0, "m")) setdefaultattr(self, "mass_unit", self.quan(1.0, "kg")) diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index 7063483ba24..2eb8edf89c9 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -134,7 +134,7 @@ class OpenPMDFieldInfo(FieldInfoContainer): References ---------- * http://yt-project.org/docs/dev/analyzing/fields.html - * http://yt-project.org/docs/dev/developing/creating_frontend.html#data-meaning-structures + * http://yt-project.org/docs/dev/developing/creating_frontend.html#data-meaning-structures # NOQA E501 * https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md * [1] http://yt-project.org/docs/dev/reference/field_list.html#universal-fields """ @@ -152,7 +152,8 @@ def __init__(self, ds, field_list): for fname in fields.keys(): field = fields[fname] if isinstance(field, h5.Dataset) or is_const_component(field): - # Don't consider axes. This appears to be a vector field of single dimensionality + # Don't consider axes. + # This appears to be a vector field of single dimensionality ytname = str("_".join([fname.replace("_", "-")])) parsed = parse_unit_dimension( np.asarray(field.attrs["unitDimension"], dtype=np.int) @@ -195,7 +196,7 @@ def __init__(self, ds, field_list): if ytattrib == "position": # Symbolically rename position to preserve yt's # interpretation of the pfield particle_position is later - # derived in setup_absolute_positions in the way yt expects it + # derived in setup_absolute_positions in the way yt expects ytattrib = "positionCoarse" if isinstance(record, h5.Dataset) or is_const_component(record): name = ["particle", ytattrib] @@ -213,9 +214,8 @@ def __init__(self, ds, field_list): except (KeyError): if recname != "particlePatches": mylog.info( - "open_pmd - %s_%s does not seem to have unitDimension", - pname, - recname, + "open_pmd - %s_%s does not seem to have " + "unitDimension", pname, recname ) for i in self.known_particle_fields: mylog.debug("open_pmd - known_particle_fields - %s", i) @@ -237,7 +237,8 @@ def setup_fluid_fields(self): def setup_particle_fields(self, ptype): """Defines which derived particle fields to create. - This will be called for every entry in `OpenPMDDataset``'s ``self.particle_types``. + This will be called for every entry in + `OpenPMDDataset``'s ``self.particle_types``. If a field can not be calculated, it will simply be skipped. """ setup_absolute_positions(self, ptype) diff --git a/yt/frontends/open_pmd/io.py b/yt/frontends/open_pmd/io.py index e457e2b06ca..a6527fe013c 100644 --- a/yt/frontends/open_pmd/io.py +++ b/yt/frontends/open_pmd/io.py @@ -55,7 +55,7 @@ def _fill_cache(self, ptype, index=0, offset=None): self.cache[i] = np.zeros(offset) def _read_particle_selection(self, chunks, selector, fields): - """Reads given particle fields for given particle species masked by a given selection. + """Read particle fields for particle species masked by a selection. Parameters ---------- @@ -63,8 +63,8 @@ def _read_particle_selection(self, chunks, selector, fields): A list of chunks A chunk is a list of grids selector - A region (inside your domain) specifying which parts of the field you want to read - See [1] and [2] + A region (inside your domain) specifying which parts of the field + you want to read. See [1] and [2] fields : array_like Tuples (ptype, pfield) representing a field @@ -89,7 +89,8 @@ def _read_particle_selection(self, chunks, selector, fields): for (ptype, pname) in fields: pfield = (ptype, pname) - # Overestimate the size of all pfields so they include all particles, shrink it later + # Overestimate the size of all pfields so they include all particles + # and shrink it later particle_count[pfield] = 0 if ptype in unions: for pt in unions[ptype]: @@ -147,8 +148,8 @@ def _read_fluid_selection(self, chunks, selector, fields, size): A list of chunks A chunk is a list of grids selector - A region (inside your domain) specifying which parts of the field you want to read - See [1] and [2] + A region (inside your domain) specifying which parts of the field + you want to read. See [1] and [2] fields : array_like Tuples (fname, ftype) representing a field size : int diff --git a/yt/frontends/open_pmd/misc.py b/yt/frontends/open_pmd/misc.py index 3435add43ab..87da1304c62 100644 --- a/yt/frontends/open_pmd/misc.py +++ b/yt/frontends/open_pmd/misc.py @@ -9,7 +9,8 @@ def parse_unit_dimension(unit_dimension): Parameters ---------- unit_dimension : array_like - integer array of length 7 with one entry for the dimensional component of every SI unit + integer array of length 7 with one entry for the dimensional component of every + SI unit [0] length L, [1] mass M, @@ -21,7 +22,9 @@ def parse_unit_dimension(unit_dimension): References ---------- - .. https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#unit-systems-and-dimensionality + .. + https://github.com/openPMD/openPMD-standard/blob/latest/STANDARD.md#unit-systems-and-dimensionality # NOQA E501 + Returns ------- @@ -101,17 +104,9 @@ def get_component(group, component_name, index=0, offset=None): else: shape[0] = offset # component is constant, craft an array by hand - # mylog.debug( - # "open_pmd - get_component: {}/{} [const {}]".format(group.name, component_name, shape) - # ) return np.full(shape, record_component.attrs["value"] * unit_si) else: if offset is not None: offset += index # component is a dataset, return it (possibly masked) - # mylog.debug( - # "open_pmd - get_component: {}/{}[{}:{}]".format( - # group.name, component_name, index, offset - # ) - # ) return np.multiply(record_component[index:offset], unit_si) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 4be5ba45766..429e3af3608 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -221,8 +221,8 @@ def __init__( self._base_domain = base_domain elif num_ghost_zones < 0: raise RuntimeError( - "Cannot initialize a domain subset with a negative number of ghost zones," - " was called with num_ghost_zones=%s" % num_ghost_zones + "Cannot initialize a domain subset with a negative number " + "of ghost zones, was called with num_ghost_zones=%s" % num_ghost_zones ) def _fill_no_ghostzones(self, fd, fields, selector, file_handler): @@ -569,8 +569,8 @@ def __init__( if group_folder == "group_00001": # Count the number of groups - # note: we exclude the unlikely event that one of the group is actually a file - # instad of a folder + # note: we exclude the unlikely event that one of the group is actually a + # file instad of a folder self.num_groups = len( [ _ @@ -632,8 +632,8 @@ def _sanitize_max_level(max_level, max_level_convention): # Check max_level_convention is set and acceptable if max_level_convention is None: raise ValueError( - "You specified `max_level` without specifying any `max_level_convention`. " - "You have to pick either 'yt' or 'ramses'." + f"Received `max_level`={max_level}, but no `max_level_convention`. " + "Valid conventions are 'yt' and 'ramses'." ) if max_level_convention not in ("ramses", "yt"): raise ValueError( @@ -752,7 +752,8 @@ def read_rhs(f, cast): self.domain_left_edge = np.zeros(3, dtype="float64") self.domain_dimensions = np.ones(3, dtype="int32") * 2 ** (self.min_level + 1) self.domain_right_edge = np.ones(3, dtype="float64") - # This is likely not true, but it's not clear how to determine the boundary conditions + # This is likely not true, but it's not clear + # how to determine the boundary conditions self.periodicity = (True, True, True) if self.force_cosmological is not None: diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 04f6acd7c21..7460119a50c 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -341,7 +341,8 @@ def detect_fields(cls, ds): else: if nvar < 5: mylog.debug( - "nvar=%s is too small! YT doesn't currently support 1D/2D runs in RAMSES %s" + "nvar=%s is too small! YT doesn't currently " + "support 1D/2D runs in RAMSES %s" ) raise ValueError # Basic hydro runs @@ -362,7 +363,8 @@ def detect_fields(cls, ds): "Pressure", "Metallicity", ] - # MHD runs - NOTE: THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE + # MHD runs - NOTE: + # THE MHD MODULE WILL SILENTLY ADD 3 TO THE NVAR IN THE MAKEFILE if nvar == 11: fields = [ "Density", @@ -500,8 +502,8 @@ def read_rhs(cast): read_rhs(float) # Read rt_c_frac - # Note: when using variable speed of light, this line will contain multiple values - # corresponding the the velocity at each level + # Note: when using variable speed of light, this line will contain multiple + # values corresponding the the velocity at each level read_rhs(lambda line: [float(e) for e in line.split()]) f.readline() diff --git a/yt/frontends/ramses/fields.py b/yt/frontends/ramses/fields.py index bbea80c2d0a..1b2d9d7216d 100644 --- a/yt/frontends/ramses/fields.py +++ b/yt/frontends/ramses/fields.py @@ -368,10 +368,11 @@ def _func(field, data): for i, (tname, unit) in enumerate(_cool_arrs): var = fd.read_vector("d") if var.size == n1 and i == 0: - # If this case occurs, the cooling files were produced pre-2010 in a format - # that is no longer supported + # If this case occurs, the cooling files were produced pre-2010 in + # a format that is no longer supported mylog.warning( - "This cooling file format is no longer supported. Cooling field loading skipped." + "This cooling file format is no longer supported. " + "Cooling field loading skipped." ) return if var.size == n1 * n2: diff --git a/yt/frontends/sdf/tests/test_outputs.py b/yt/frontends/sdf/tests/test_outputs.py index 0edb47b7c30..5624bbbb76e 100644 --- a/yt/frontends/sdf/tests/test_outputs.py +++ b/yt/frontends/sdf/tests/test_outputs.py @@ -13,7 +13,8 @@ scivis_data = ncsa_scivis_data # Answer on http://stackoverflow.com/questions/3764291/checking-network-connection -# Better answer on http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python +# Better answer on +# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python def internet_on(): try: urllib.request.urlopen(scivis_data, timeout=1) diff --git a/yt/frontends/stream/io.py b/yt/frontends/stream/io.py index 626ade00278..d505624c831 100644 --- a/yt/frontends/stream/io.py +++ b/yt/frontends/stream/io.py @@ -17,9 +17,6 @@ def __init__(self, ds): def _read_data_set(self, grid, field): # This is where we implement processor-locking - # if grid.id not in self.grids_in_memory: - # mylog.error("Was asked for %s but I have %s", grid.id, self.grids_in_memory.keys()) - # raise KeyError tr = self.fields[grid.id][field] # If it's particles, we copy. if len(tr.shape) == 1: diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index 416addc5e4f..7010c6813cd 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -420,8 +420,8 @@ def _identify_fields(self, data_file): return self._field_list, {} def _calculate_particle_offsets(self, data_file, pcounts): - # This computes the offsets for each particle type into a "data_file." Note that - # the term "data_file" here is a bit overloaded, and also refers to a + # This computes the offsets for each particle type into a "data_file." + # Note that the term "data_file" here is a bit overloaded, and also refers to a # "chunk" of particles inside a data file. # data_file.start represents the *particle count* that we should start at. # diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 196acf3e37e..56a5c254f14 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -290,7 +290,8 @@ def _find_field_values_at_points(self, fields, coords): def _find_points(self, x, y, z): """ - Returns the (objects, indices) of leaf grids containing a number of (x,y,z) points + Returns the (objects, indices) of leaf grids + containing a number of (x,y,z) points """ x = ensure_numpy_array(x) y = ensure_numpy_array(y) diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 68c7da29ceb..25bcaac2bf5 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -20,9 +20,10 @@ def test_load_nonexistent_data(): FileNotFoundError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo" ) - # this one is a design choice: it is preferable to report the most important - # problem in an error message (missing data is worse than a typo in - # simulation_type), so we make sure the error raised is not YTSimulationNotIdentified + # this one is a design choice: + # it is preferable to report the most important problem in an error message + # (missing data is worse than a typo insimulation_type) + # so we make sure the error raised is not YTSimulationNotIdentified assert_raises( FileNotFoundError, simulation, diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 11503378d78..c3d5a37ed51 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -608,7 +608,8 @@ def __call__(self, parser, namespace, values, option_string=None): type=float, dest="halo_radius", default=0.1, - help="Constant radius for profiling halos if using hop output files with no radius entry. Default: 0.1.", + help="Constant radius for profiling halos if using hop output files with no " + + "radius entry. Default: 0.1.", ), halo_radius_units=dict( longname="--halo_radius_units", @@ -616,7 +617,8 @@ def __call__(self, parser, namespace, values, option_string=None): type=str, dest="halo_radius_units", default="1", - help="Units for radius used with --halo_radius flag. Default: '1' (code units).", + help="Units for radius used with --halo_radius flag. " + + "Default: '1' (code units).", ), halo_hop_style=dict( longname="--halo_hop_style", @@ -624,7 +626,8 @@ def __call__(self, parser, namespace, values, option_string=None): type=str, dest="halo_hop_style", default="new", - help="Style of hop output file. 'new' for yt_hop files and 'old' for enzo_hop files.", + help="Style of hop output file. " + + "'new' for yt_hop files and 'old' for enzo_hop files.", ), halo_dataset=dict( longname="--halo_dataset", diff --git a/yt/utilities/cosmology.py b/yt/utilities/cosmology.py index 1071804bb05..ef85aa8fc53 100644 --- a/yt/utilities/cosmology.py +++ b/yt/utilities/cosmology.py @@ -625,8 +625,9 @@ def get_dark_factor(self, z): note that there's a typo in his eq. There should be no negative sign). At the moment, this only works using the parameterization given in Linder 2002 - eq. 7: w(a) = w0 + wa(1 - a) = w0 + wa * z / (1+z). This gives rise to an analytic - expression. It is also only functional for Gadget simulations, at the moment. + eq. 7: w(a) = w0 + wa(1 - a) = w0 + wa * z / (1+z). This gives rise to an + analytic expression. + It is also only functional for Gadget simulations, at the moment. Parameters ---------- diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index fb5b98b4e95..b610e167773 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -176,8 +176,8 @@ def __init__(self, ds): def __str__(self): return ( - "Simulation %s has no stopping condition. StopTime or StopCycle should be set." - % self.ds + "Simulation %s has no stopping condition. " + "StopTime or StopCycle should be set." % self.ds ) diff --git a/yt/utilities/linear_interpolators.py b/yt/utilities/linear_interpolators.py index 1b69e1edfd4..011bbcede39 100644 --- a/yt/utilities/linear_interpolators.py +++ b/yt/utilities/linear_interpolators.py @@ -197,7 +197,8 @@ def __init__(self, table, boundaries, field_names, truncate=False): self.z_bins = boundaries[2] else: mylog.error( - "Boundaries must be given as (x0, x1, y0, y1, z0, z1) or as (x_bins, y_bins, z_bins)" + "Boundaries must be given as (x0, x1, y0, y1, z0, z1) " + "or as (x_bins, y_bins, z_bins)" ) raise ValueError diff --git a/yt/utilities/math_utils.py b/yt/utilities/math_utils.py index ef95df8a762..cb7ea01632c 100644 --- a/yt/utilities/math_utils.py +++ b/yt/utilities/math_utils.py @@ -165,9 +165,16 @@ def periodic_ray(start, end, left=None, right=None): >>> start = yt.YTArray([0.5, 0.5, 0.5]) >>> end = yt.YTArray([1.25, 1.25, 1.25]) >>> periodic_ray(start, end) - [[YTArray([0.5, 0.5, 0.5]) (dimensionless), YTArray([1., 1., 1.]) (dimensionless)], - [YTArray([0., 0., 0.]) (dimensionless), YTArray([0.25, 0.25, 0.25]) (dimensionless)]] - + [ + [ + YTArray([0.5, 0.5, 0.5]) (dimensionless), + YTArray([1., 1., 1.]) (dimensionless) + ], + [ + YTArray([0., 0., 0.]) (dimensionless), + YTArray([0.25, 0.25, 0.25]) (dimensionless) + ] + ] """ if left is None: diff --git a/yt/utilities/orientation.py b/yt/utilities/orientation.py index 00b101adb7c..c536b6a38d2 100644 --- a/yt/utilities/orientation.py +++ b/yt/utilities/orientation.py @@ -70,8 +70,9 @@ def _setup_normalized_vectors(self, normal_vector, north_vector): t = np.cross(normal_vector, vecs).sum(axis=1) ax = t.argmax() east_vector = np.cross(vecs[ax, :], normal_vector).ravel() - # self.north_vector must remain None otherwise rotations about a fixed axis will break. - # The north_vector calculated here will still be included in self.unit_vectors. + # self.north_vector must remain None otherwise rotations about a fixed axis + # will break. The north_vector calculated here will still be included + # in self.unit_vectors. north_vector = np.cross(normal_vector, east_vector).ravel() else: if self.steady_north or (np.dot(north_vector, normal_vector) != 0.0): diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index d33511f9a58..ab3a9b35b04 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -150,7 +150,7 @@ def map_grid_fields_to_particles(self, mapping_dict): Examples -------- >>> field_map = {'density':'particle_density', - >>> 'temperature':'particle_temperature'} + ... 'temperature':'particle_temperature'} >>> particles.map_grid_fields_to_particles(field_map) """ pbar = get_pbar("Mapping fields to particles", self.num_grids) @@ -245,7 +245,7 @@ def __init__(self, ds, num_particles, data, ptype="io"): >>> posz = np.random.random((num_p)) >>> mass = np.ones((num_p)) >>> data = {'particle_position_x': posx, 'particle_position_y': posy, - >>> 'particle_position_z': posz, 'particle_mass': mass} + ... 'particle_position_z': posz, 'particle_mass': mass} >>> particles = FromListParticleGenerator(ds, num_p, data) """ @@ -308,8 +308,8 @@ def __init__( >>> le = np.array([0.25,0.25,0.25]) >>> re = np.array([0.75,0.75,0.75]) >>> fields = ["particle_position_x","particle_position_y", - >>> "particle_position_z", - >>> "particle_density","particle_temperature"] + ... "particle_position_z", + ... "particle_density","particle_temperature"] >>> particles = LatticeParticleGenerator(ds, dims, le, re, fields) """ @@ -384,8 +384,13 @@ def __init__( >>> fields = ["particle_position_x","particle_position_y", >>> "particle_position_z", >>> "particle_density","particle_temperature"] - >>> particles = WithDensityParticleGenerator(ds, sphere, num_particles, - >>> fields, density_field='Dark_Matter_Density') + >>> particles = WithDensityParticleGenerator( + ... ds, + ... sphere, + ... num_particles, + ... fields, + ... density_field='Dark_Matter_Density' + ... ) """ super(WithDensityParticleGenerator, self).__init__( diff --git a/yt/utilities/sdf.py b/yt/utilities/sdf.py index e9364173d05..d412ee3b892 100644 --- a/yt/utilities/sdf.py +++ b/yt/utilities/sdf.py @@ -897,8 +897,8 @@ def get_data(self, chunk, fields): return data def get_next_nonzero_chunk(self, key, stop=None): - # These next two while loops are to squeeze the keys if they are empty. Would be better - # to go through and set base equal to the last non-zero base, i think. + # These next two while loops are to squeeze the keys if they are empty. + # Would be better to go through and set base equal to the last non-zero base. if stop is None: stop = self._max_key while key < stop: @@ -910,8 +910,8 @@ def get_next_nonzero_chunk(self, key, stop=None): return key def get_previous_nonzero_chunk(self, key, stop=None): - # These next two while loops are to squeeze the keys if they are empty. Would be better - # to go through and set base equal to the last non-zero base, i think. + # These next two while loops are to squeeze the keys if they are empty. + # Would be better to go through and set base equal to the last non-zero base. if stop is None: stop = self.indexdata["index"][0] while key > stop: diff --git a/yt/visualization/fits_image.py b/yt/visualization/fits_image.py index 14e86420e62..480ea2dcadb 100644 --- a/yt/visualization/fits_image.py +++ b/yt/visualization/fits_image.py @@ -562,7 +562,7 @@ def info(self, output=None): if output is None: output = sys.stdout if num_cols == 8: - header = "No. Name Ver Type Cards Dimensions Format Units" + header = "No. Name Ver Type Cards Dimensions Format Units" # NOQA E501 format = "{:3d} {:10} {:3} {:11} {:5d} {} {} {}" else: header = ( diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index 1aa58ee18b9..7244c979775 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -168,7 +168,8 @@ def write_image(image, filename, color_bounds=None, cmap_name=None, func=lambda This function will scale an image and directly call libpng to write out a colormapped version of that image. It is designed for rapid-fire saving of - image buffers generated using `yt.visualization.api.FixedResolutionBuffers` and the like. + image buffers generated using `yt.visualization.api.FixedResolutionBuffers` + and the likes. Parameters ---------- @@ -212,7 +213,8 @@ def apply_colormap(image, color_bounds=None, cmap_name=None, func=lambda x: x): This function will scale an image and directly call libpng to write out a colormapped version of that image. It is designed for rapid-fire saving of - image buffers generated using `yt.visualization.api.FixedResolutionBuffers` and the like. + image buffers generated using `yt.visualization.api.FixedResolutionBuffers` + and the likes. Parameters ---------- diff --git a/yt/visualization/line_plot.py b/yt/visualization/line_plot.py index e57e7b8a3a2..921b11fe48f 100644 --- a/yt/visualization/line_plot.py +++ b/yt/visualization/line_plot.py @@ -232,11 +232,17 @@ def from_lines( Example -------- - >>> ds = yt.load('SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', step=-1) + >>> ds = yt.load( + >>> 'SecondOrderTris/RZ_p_no_parts_do_nothing_bcs_cone_out.e', + >>> step=-1 + >>> ) >>> fields = [field for field in ds.field_list if field[0] == 'all'] - >>> lines = [] - >>> lines.append(yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25')) - >>> lines.append(yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5')) + >>> lines = [ + ... yt.LineBuffer(ds, [0.25, 0, 0], [0.25, 1, 0], 100, label='x = 0.25'), + ... yt.LineBuffer(ds, [0.5, 0, 0], [0.5, 1, 0], 100, label='x = 0.5') + ... ] + >>> lines.append() + >>> plot = yt.LinePlot.from_lines(ds, fields, lines) >>> plot.save() diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 4cd582aec5d..7afff832718 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -92,8 +92,9 @@ def newfunc(*args, **kwargs): def accepts_all_fields(f): - """Decorate a function whose second argument is and deal with the special case - field == 'all', looping over all fields already present in the PlotContainer instance. + """ + Decorate a function whose second argument is and deal with the special case + field == 'all', looping over all fields already present in the PlotContainer object. """ # This is to be applied to PlotContainer class methods with the following signature: @@ -271,7 +272,8 @@ def get_log(self, field): if field == 'all', applies to all plots. """ - # devnote : accepts_all_fields decorator is not applicable here because the return variable isn't self + # devnote : accepts_all_fields decorator is not applicable here because + # the return variable isn't self log = {} if field == "all": fields = list(self.plots.keys()) @@ -909,7 +911,8 @@ def _sanitize_units(z, _field): plot_units = self.frb[_field].units z = z.to(plot_units).value except AttributeError: - # only certain subclasses have a frb attribute they can rely on for inspecting units + # only certain subclasses have a frb attribute + # they can rely on for inspecting units mylog.warning( "%s class doesn't support zmin/zmax set as tuples or YTQuantity", self.__class__.__name__, @@ -943,7 +946,7 @@ def _sanitize_units(z, _field): def set_cbar_minorticks(self, field, state): """Deprecated alias, kept for backward compatibility. - turn colorbar minor ticks "on" or "off" in the current plot, according to *state* + turn colorbar minor ticks "on" or "off" in the current plot, follwoin *state* Parameters ---------- diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 01efadc8cb8..0eb51d19d31 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -791,7 +791,7 @@ def __call__(self, plot): min_level = self.min_level or 0 max_level = self.max_level or levels.max() - # sorts the four arrays in order of ascending level - this makes images look nicer + # sort the four arrays in order of ascending level, this makes images look nicer new_indices = np.argsort(levels) levels = levels[new_indices] GLE = GLE[new_indices] @@ -1776,7 +1776,8 @@ class HaloCatalogCallback(PlotCallback): Parameters ---------- - halo_catalog : Dataset, DataContainer, or ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog + halo_catalog : Dataset, DataContainer, + or ~yt.analysis_modules.halo_analysis.halo_catalog.HaloCatalog The object containing halos to be overplotted. This can be a HaloCatalog object, a loaded halo catalog dataset, or a data container from a halo catalog dataset. @@ -2349,13 +2350,10 @@ class TimestampCallback(PlotCallback): This string defines the coordinate system of the coordinates of pos Valid coordinates are: - "data" -- the 3D dataset coordinates - - "plot" -- the 2D coordinates defined by the actual plot limits - - "axis" -- the MPL axis coordinates: (0,0) is lower left; (1,1) is upper right - - "figure" -- the MPL figure coordinates: (0,0) is lower left, (1,1) is upper right + - "data": 3D dataset coordinates + - "plot": 2D coordinates defined by the actual plot limits + - "axis": MPL axis coordinates: (0,0) is lower left; (1,1) is upper right + - "figure": MPL figure coordinates: (0,0) is lower left, (1,1) is upper right time_offset : float, (value, unit) tuple, or YTQuantity, optional Apply an offset to the time shown in the annotation from the @@ -2583,13 +2581,10 @@ class ScaleCallback(PlotCallback): This string defines the coordinate system of the coordinates of pos Valid coordinates are: - "data" -- the 3D dataset coordinates - - "plot" -- the 2D coordinates defined by the actual plot limits - - "axis" -- the MPL axis coordinates: (0,0) is lower left; (1,1) is upper right - - "figure" -- the MPL figure coordinates: (0,0) is lower left, (1,1) is upper right + - "data": 3D dataset coordinates + - "plot": 2D coordinates defined by the actual plot limits + - "axis": MPL axis coordinates: (0,0) is lower left; (1,1) is upper right + - "figure": MPL figure coordinates: (0,0) is lower left, (1,1) is upper right text_args : dictionary, optional A dictionary of parameters to used to update the font_properties diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 6378f185a44..ceffd5ba666 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -153,7 +153,8 @@ class PlotWindow(ImagePlotContainer): Parameters ---------- - data_source : :class:`yt.data_objects.selection_data_containers.YTSelectionContainer2D` + data_source : + :class:`yt.data_objects.selection_data_containers.YTSelectionContainer2D` This is the source to be pixelized, which can be a projection, slice, or a cutting plane. bounds : sequence of floats @@ -457,20 +458,20 @@ def set_origin(self, origin): coordinate space can be given. If plain numeric types are input, units of `code_length` are assumed. Further examples: - =============================================== ================================== - format example - =============================================== ================================== - '{space}' 'domain' - '{xloc}-{space}' 'left-window' - '{yloc}-{space}' 'upper-domain' - '{yloc}-{xloc}-{space}' 'lower-right-window' - ('{space}',) ('window',) - ('{xloc}', '{space}') ('right', 'domain') - ('{yloc}', '{space}') ('lower', 'window') - ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') - ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0.5, 'm'), (0.4, 'm'), 'window') - (xloc, yloc, '{space}') (0.23, 0.5, 'domain') - =============================================== ================================== + =============================================== =============================== + format example + =============================================== =============================== + '{space}' 'domain' + '{xloc}-{space}' 'left-window' + '{yloc}-{space}' 'upper-domain' + '{yloc}-{xloc}-{space}' 'lower-right-window' + ('{space}',) ('window',) + ('{xloc}', '{space}') ('right', 'domain') + ('{yloc}', '{space}') ('lower', 'window') + ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') + ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window') + (xloc, yloc, '{space}') (0.23, 0.5, 'domain') + =============================================== =============================== """ self.origin = origin return self @@ -497,7 +498,8 @@ def set_mpl_projection(self, mpl_proj): if passed as a string, mpl_proj is the specified projection type, if passed as a tuple, then tuple will take the form of ``("ProjectionType", (args))`` or ``("ProjectionType", (args), {kwargs})`` - Valid projection type options include: 'PlateCarree', 'LambertConformal', 'LabmbertCylindrical', + Valid projection type options include: + 'PlateCarree', 'LambertConformal', 'LabmbertCylindrical', 'Mercator', 'Miller', 'Mollweide', 'Orthographic', 'Robinson', 'Stereographic', 'TransverseMercator', 'InterruptedGoodeHomolosine', 'RotatedPole', 'OGSB', @@ -1154,9 +1156,8 @@ def _setup_plots(self): else: mylog.error( - "Unable to draw cbar minorticks for field %s with transform %s ", - f, - self._field_transform[f], + "Unable to draw cbar minorticks for field " + "%s with transform %s ", f, self._field_transform[f] ) self._cbar_minorticks[f] = False @@ -1290,10 +1291,10 @@ def export_to_mpl_figure( cbar_pad="0%", ): r""" - Creates a matplotlib figure object with the specified axes arrangement, nrows_ncols, - and maps the underlying figures to the matplotlib axes. Note that all of these - parameters are fed directly to the matplotlib ImageGrid class to create the new figure - layout. + Creates a matplotlib figure object with the specified axes arrangement, + nrows_ncols, and maps the underlying figures to the matplotlib axes. + Note that all of these parameters are fed directly to the matplotlib ImageGrid + class to create the new figure layout. Parameters ---------- @@ -1430,20 +1431,20 @@ class AxisAlignedSlicePlot(PWViewerMPL): coordinate space can be given. If plain numeric types are input, units of `code_length` are assumed. Further examples: - =============================================== ================================== - format example - =============================================== ================================== - '{space}' 'domain' - '{xloc}-{space}' 'left-window' - '{yloc}-{space}' 'upper-domain' - '{yloc}-{xloc}-{space}' 'lower-right-window' - ('{space}',) ('window',) - ('{xloc}', '{space}') ('right', 'domain') - ('{yloc}', '{space}') ('lower', 'window') - ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') - ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0.5, 'm'), (0.4, 'm'), 'window') - (xloc, yloc, '{space}') (0.23, 0.5, 'domain') - =============================================== ================================== + =============================================== =============================== + format example + =============================================== =============================== + '{space}' 'domain' + '{xloc}-{space}' 'left-window' + '{yloc}-{space}' 'upper-domain' + '{yloc}-{xloc}-{space}' 'lower-right-window' + ('{space}',) ('window',) + ('{xloc}', '{space}') ('right', 'domain') + ('{yloc}', '{space}') ('lower', 'window') + ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') + ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window') + (xloc, yloc, '{space}') (0.23, 0.5, 'domain') + =============================================== =============================== axes_unit : string The name of the unit for the tick labels on the x and y axes. Defaults to None, which automatically picks an appropriate unit. @@ -1620,20 +1621,20 @@ class ProjectionPlot(PWViewerMPL): coordinate space can be given. If plain numeric types are input, units of `code_length` are assumed. Further examples: - =============================================== ================================== - format example - =============================================== ================================== - '{space}' 'domain' - '{xloc}-{space}' 'left-window' - '{yloc}-{space}' 'upper-domain' - '{yloc}-{xloc}-{space}' 'lower-right-window' - ('{space}',) ('window',) - ('{xloc}', '{space}') ('right', 'domain') - ('{yloc}', '{space}') ('lower', 'window') - ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') - ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0.5, 'm'), (0.4, 'm'), 'window') + =============================================== =============================== + format example + =============================================== =============================== + '{space}' 'domain' + '{xloc}-{space}' 'left-window' + '{yloc}-{space}' 'upper-domain' + '{yloc}-{xloc}-{space}' 'lower-right-window' + ('{space}',) ('window',) + ('{xloc}', '{space}') ('right', 'domain') + ('{yloc}', '{space}') ('lower', 'window') + ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') + ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window') (xloc, yloc, '{space}') (0.23, 0.5, 'domain') - =============================================== ================================== + =============================================== =============================== right_handed : boolean Whether the implicit east vector for the image generated is set to make a right @@ -1844,8 +1845,8 @@ class OffAxisSlicePlot(PWViewerMPL): set, an arbitrary grid-aligned north-vector is chosen. right_handed : boolean Whether the implicit east vector for the image generated is set to make a right - handed coordinate system with the north vector and the normal, the direction of the - 'window' into the data. + handed coordinate system with the north vector and the normal, the direction of + the 'window' into the data. fontsize : integer The size of the fonts for the axis, colorbar, and tick labels. field_parameters : dictionary @@ -2031,8 +2032,8 @@ class OffAxisProjectionPlot(PWViewerMPL): set, an arbitrary grid-aligned north-vector is chosen. right_handed : boolean Whether the implicit east vector for the image generated is set to make a right - handed coordinate system with the north vector and the normal, the direction of the - 'window' into the data. + handed coordinate system with the north vector and the normal, the direction of + the 'window' into the data. fontsize : integer The size of the fonts for the axis, colorbar, and tick labels. method : string @@ -2303,20 +2304,20 @@ def SlicePlot(ds, normal=None, fields=None, axis=None, *args, **kwargs): coordinate space can be given. If plain numeric types are input, units of `code_length` are assumed. Further examples: - =============================================== ================================== - format example - =============================================== ================================== - '{space}' 'domain' - '{xloc}-{space}' 'left-window' - '{yloc}-{space}' 'upper-domain' - '{yloc}-{xloc}-{space}' 'lower-right-window' - ('{space}',) ('window',) - ('{xloc}', '{space}') ('right', 'domain') - ('{yloc}', '{space}') ('lower', 'window') - ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') - ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0.5, 'm'), (0.4, 'm'), 'window') - (xloc, yloc, '{space}') (0.23, 0.5, 'domain') - =============================================== ================================== + =============================================== =============================== + format example + =============================================== =============================== + '{space}' 'domain' + '{xloc}-{space}' 'left-window' + '{yloc}-{space}' 'upper-domain' + '{yloc}-{xloc}-{space}' 'lower-right-window' + ('{space}',) ('window',) + ('{xloc}', '{space}') ('right', 'domain') + ('{yloc}', '{space}') ('lower', 'window') + ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') + ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window') + (xloc, yloc, '{space}') (0.23, 0.5, 'domain') + =============================================== =============================== north_vector : a sequence of floats A vector defining the 'up' direction in the `OffAxisSlicePlot`; not used in `AxisAlignedSlicePlot`. This option sets the orientation of the @@ -2470,20 +2471,20 @@ def plot_2d( coordinate space can be given. If plain numeric types are input, units of `code_length` are assumed. Further examples: - =============================================== ================================== - format example - =============================================== ================================== - '{space}' 'domain' - '{xloc}-{space}' 'left-window' - '{yloc}-{space}' 'upper-domain' - '{yloc}-{xloc}-{space}' 'lower-right-window' - ('{space}',) ('window',) - ('{xloc}', '{space}') ('right', 'domain') - ('{yloc}', '{space}') ('lower', 'window') - ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') - ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0.5, 'm'), (0.4, 'm'), 'window') - (xloc, yloc, '{space}') (0.23, 0.5, 'domain') - =============================================== ================================== + =============================================== =============================== + format example + =============================================== =============================== + '{space}' 'domain' + '{xloc}-{space}' 'left-window' + '{yloc}-{space}' 'upper-domain' + '{yloc}-{xloc}-{space}' 'lower-right-window' + ('{space}',) ('window',) + ('{xloc}', '{space}') ('right', 'domain') + ('{yloc}', '{space}') ('lower', 'window') + ('{yloc}', '{xloc}', '{space}') ('lower', 'right', 'window') + ((yloc, '{unit}'), (xloc, '{unit}'), '{space}') ((0, 'm'), (.4, 'm'), 'window') + (xloc, yloc, '{space}') (0.23, 0.5, 'domain') + =============================================== =============================== axes_unit : string The name of the unit for the tick labels on the x and y axes. Defaults to None, which automatically picks an appropriate unit. diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 17be01b0d2d..ac45d93a2d8 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -114,7 +114,8 @@ def off_axis_projection( if interpolated: raise NotImplementedError( - "Only interpolated=False methods are currently implemented for off-axis-projections" + "Only interpolated=False methods are currently implemented " + "for off-axis-projections" ) data_source = data_source_or_all(data_source) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 898399b3988..2ccd58549d8 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -424,9 +424,9 @@ def render(self, camera, zbuffer=None): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` A volume rendering camera. Can be any type of camera. - zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` A zbuffer array. This is used for opaque sources to determine the z position of the source relative to other sources. Only useful if you are manually calling render on multiple sources. Scene.render @@ -434,7 +434,7 @@ def render(self, camera, zbuffer=None): Returns ------- - A :class:`yt.data_objects.image_array.ImageArray` instance containing + A :class:`yt.data_objects.image_array.ImageArray` containing the rendered image. """ @@ -469,9 +469,9 @@ def finalize_image(self, camera, image): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` The camera used to produce the volume rendering image. - image: :class:`yt.data_objects.image_array.ImageArray` instance + image: :class:`yt.data_objects.image_array.ImageArray` A reference to an image to fill """ if self._volume is not None: @@ -673,9 +673,9 @@ def render(self, camera, zbuffer=None): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` A volume rendering camera. Can be any type of camera. - zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` A zbuffer array. This is used for opaque sources to determine the z position of the source relative to other sources. Only useful if you are manually calling render on multiple sources. Scene.render @@ -683,7 +683,7 @@ def render(self, camera, zbuffer=None): Returns ------- - A :class:`yt.data_objects.image_array.ImageArray` instance containing + A :class:`yt.data_objects.image_array.ImageArray` containing the rendered image. """ @@ -873,9 +873,9 @@ def render(self, camera, zbuffer=None): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` A volume rendering camera. Can be any type of camera. - zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` A zbuffer array. This is used for opaque sources to determine the z position of the source relative to other sources. Only useful if you are manually calling render on multiple sources. Scene.render @@ -883,7 +883,7 @@ def render(self, camera, zbuffer=None): Returns ------- - A :class:`yt.data_objects.image_array.ImageArray` instance containing + A :class:`yt.data_objects.image_array.ImageArray` containing the rendered image. """ @@ -995,17 +995,16 @@ def render(self, camera, zbuffer=None): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` A volume rendering camera. Can be any type of camera. - zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance - A zbuffer array. This is used for opaque sources to determine the + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` z position of the source relative to other sources. Only useful if you are manually calling render on multiple sources. Scene.render uses this internally. Returns ------- - A :class:`yt.data_objects.image_array.ImageArray` instance containing + A :class:`yt.data_objects.image_array.ImageArray` containing the rendered image. """ @@ -1282,9 +1281,9 @@ def render(self, camera, zbuffer=None): Parameters ---------- - camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + camera: :class:`yt.visualization.volume_rendering.camera.Camera` A volume rendering camera. Can be any type of camera. - zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` A zbuffer array. This is used for opaque sources to determine the z position of the source relative to other sources. Only useful if you are manually calling render on multiple sources. Scene.render @@ -1292,7 +1291,7 @@ def render(self, camera, zbuffer=None): Returns ------- - A :class:`yt.data_objects.image_array.ImageArray` instance containing + A :class:`yt.data_objects.image_array.ImageArray` containing the rendered image. """ diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 06d7e132151..80440ba9035 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -121,7 +121,8 @@ def add_source(self, render_source, keyname=None): Parameters ---------- - render_source: :class:`yt.visualization.volume_rendering.render_source.RenderSource` + render_source: + :class:`yt.visualization.volume_rendering.render_source.RenderSource` A source to contribute to the volume rendering scene. keyname: string (optional) @@ -287,7 +288,7 @@ def save(self, fname=None, sigma_clip=None, render=True): >>> sc.save('test.png', sigma_clip=4) When saving multiple images without modifying the scene (camera, - sources,etc.), render=False can be used to avoid re-rendering when a scene is saved. + sources,etc.), render=False can be used to avoid re-rendering. This is useful for generating images at a range of sigma_clip values: >>> import yt diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index 72c9a86638c..96db435974b 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -24,11 +24,11 @@ class ShaderProgram: ---------- vertex_shader : string - or :class:`yt.visualization.volume_rendering.shader_objects.VertexShader` + or :class:`yt.visualization.volume_rendering.shader_objects.VertexShader` The vertex shader used in the Interactive Data Visualization pipeline. fragment_shader : string - or :class:`yt.visualization.volume_rendering.shader_objects.FragmentShader` + or :class:`yt.visualization.volume_rendering.shader_objects.FragmentShader` The fragment shader used in the Interactive Data Visualization pipeline. """ diff --git a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py index 8de78d2d119..f6052c3c60f 100644 --- a/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py +++ b/yt/visualization/volume_rendering/tests/test_off_axis_SPH.py @@ -44,8 +44,8 @@ def test_no_rotation(): @requires_module("scipy") def test_basic_rotation_1(): """ All particles on Z-axis should now be on the negative Y-Axis - fake_sph_orientation has three z-axis particles, so there should be three y-axis particles - after rotation + fake_sph_orientation has three z-axis particles, + so there should be three y-axis particles after rotation (0, 0, 1) -> (0, -1) (0, 0, 2) -> (0, -2) (0, 0, 3) -> (0, -3) @@ -79,9 +79,9 @@ def test_basic_rotation_1(): @requires_module("scipy") def test_basic_rotation_2(): - """ Rotation of x-axis onto z-axis. All particles on z-axis should now be on the negative x-Axis - fake_sph_orientation has three z-axis particles, so there should be three x-axis particles - after rotation + """ Rotation of x-axis onto z-axis. + All particles on z-axis should now be on the negative x-Axis fake_sph_orientation + has three z-axis particles, so there should be three x-axis particles after rotation (0, 0, 1) -> (-1, 0) (0, 0, 2) -> (-2, 0) (0, 0, 3) -> (-3, 0) @@ -118,9 +118,10 @@ def test_basic_rotation_2(): @requires_module("scipy") def test_basic_rotation_3(): - """ Rotation of z-axis onto negative z-axis. All fake particles on z-axis should now be on - the negative z-Axis. - fake_sph_orientation has three z-axis particles, so we should have a local maxima at (0, 0) + """Rotation of z-axis onto negative z-axis. + All fake particles on z-axis should now be of the negative z-Axis. + fake_sph_orientation has three z-axis particles, + so we should have a local maxima at (0, 0) (0, 0, 1) -> (0, 0) (0, 0, 2) -> (0, 0) (0, 0, 3) -> (0, 0) diff --git a/yt/visualization/volume_rendering/transfer_functions.py b/yt/visualization/volume_rendering/transfer_functions.py index af14b24b564..4c9288d575e 100644 --- a/yt/visualization/volume_rendering/transfer_functions.py +++ b/yt/visualization/volume_rendering/transfer_functions.py @@ -237,7 +237,8 @@ def clear(self): def __repr__(self): disp = ( - ": x_bounds:(%3.2g, %3.2g) nbins:%3.2g features:%s" + ": " + "x_bounds:(%3.2g, %3.2g) nbins:%3.2g features:%s" % (self.x_bounds[0], self.x_bounds[1], self.nbins, self.features) ) return disp From 4dc831f9bbf27d383b90f556f83e55705ca05772 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 09:24:24 +0200 Subject: [PATCH 388/653] missing space in multiline string Co-authored-by: Corentin Cadiou --- yt/data_objects/selection_data_containers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 3ac09205ad6..3e73f5a940b 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -1121,7 +1121,7 @@ def _cond_ind(self): locals = self.locals.copy() if "obj" in locals: raise RuntimeError( - "'obj' has been defined in the 'locals' ;" + "'obj' has been defined in the 'locals' ; " "this is not supported, please rename the variable." ) locals["obj"] = obj From 9ea5d47c174ac00f3c2fe37b8c1b074e5c4e7ad0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 09:25:33 +0200 Subject: [PATCH 389/653] remove empty line Co-authored-by: Corentin Cadiou --- yt/frontends/gamer/data_structures.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index 101ecff4a54..bc8a882264a 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -188,7 +188,6 @@ def _validate_parent_children_relationship(self): % (grid.id, c.id, grid.RightEdge[d], c.RightEdge[d]) ) if not grid.LeftEdge[d] <= c.LeftEdge[d]: - raise ValueError(msgL) if not grid.RightEdge[d] >= c.RightEdge[d]: From e47b52dd94ec6e57a538b07ba20cb06d196aaa4c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 09:25:57 +0200 Subject: [PATCH 390/653] fix a typo in docstring Co-authored-by: Corentin Cadiou --- yt/visualization/plot_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 7afff832718..5db1e0c6618 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -946,7 +946,7 @@ def _sanitize_units(z, _field): def set_cbar_minorticks(self, field, state): """Deprecated alias, kept for backward compatibility. - turn colorbar minor ticks "on" or "off" in the current plot, follwoin *state* + turn colorbar minor ticks "on" or "off" in the current plot, following *state* Parameters ---------- From 098281500f090f228a71909d61d365b6101b59e3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 7 Aug 2020 09:27:25 +0200 Subject: [PATCH 391/653] fix logging syntax --- yt/frontends/amrvac/fields.py | 5 +++-- yt/frontends/open_pmd/fields.py | 4 +++- yt/visualization/plot_window.py | 4 +++- 3 files changed, 9 insertions(+), 4 deletions(-) diff --git a/yt/frontends/amrvac/fields.py b/yt/frontends/amrvac/fields.py index 6313ed8f902..3c0efacb850 100644 --- a/yt/frontends/amrvac/fields.py +++ b/yt/frontends/amrvac/fields.py @@ -43,7 +43,7 @@ def _velocity(field, data, idir, prefix=None): mylog.info( "zeros found in %sdensity, " "patching them to compute corresponding velocity field.", - prefix + prefix, ) mask2 = moment == 0 if not ((mask1 & mask2) == mask1).all(): @@ -126,7 +126,8 @@ def _setup_dust_fields(self): if idust > imax: mylog.error( "Only the first %d dust species are currently read by yt. " - "If you read this, please consider issuing a ticket. ", imax + "If you read this, please consider issuing a ticket. ", + imax, ) break self._setup_velocity_fields(idust) diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index 2eb8edf89c9..c82c6d4b4e8 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -215,7 +215,9 @@ def __init__(self, ds, field_list): if recname != "particlePatches": mylog.info( "open_pmd - %s_%s does not seem to have " - "unitDimension", pname, recname + "unitDimension", + pname, + recname, ) for i in self.known_particle_fields: mylog.debug("open_pmd - known_particle_fields - %s", i) diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index ceffd5ba666..527a2f98905 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1157,7 +1157,9 @@ def _setup_plots(self): else: mylog.error( "Unable to draw cbar minorticks for field " - "%s with transform %s ", f, self._field_transform[f] + "%s with transform %s ", + f, + self._field_transform[f], ) self._cbar_minorticks[f] = False From da86be6a1bd64dc83bbeb3e33b8c67eb86d80c02 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 08:44:46 +0200 Subject: [PATCH 392/653] fix E501 errors --- yt/frontends/fits/data_structures.py | 7 ++++--- yt/utilities/object_registries.py | 3 ++- yt/visualization/plot_container.py | 3 ++- 3 files changed, 8 insertions(+), 5 deletions(-) diff --git a/yt/frontends/fits/data_structures.py b/yt/frontends/fits/data_structures.py index b48a48803f5..12e48c712fb 100644 --- a/yt/frontends/fits/data_structures.py +++ b/yt/frontends/fits/data_structures.py @@ -174,13 +174,14 @@ def _detect_output_fields(self): mylog.info("Adding field %s to the list of fields.", fname) if units == "dimensionless": mylog.warning( - "Could not determine dimensions for field %s, setting to dimensionless.", + "Could not determine dimensions for field %s, " + "setting to dimensionless.", fname, ) else: mylog.warning( - "Image block %s does not have the same dimensions as the primary and will not be " - "available as a field.", + "Image block %s does not have the same dimensions " + "as the primary and will not be available as a field.", hdu.name.lower(), ) diff --git a/yt/utilities/object_registries.py b/yt/utilities/object_registries.py index 8c7018501e6..f80055849d5 100644 --- a/yt/utilities/object_registries.py +++ b/yt/utilities/object_registries.py @@ -1,4 +1,5 @@ -# These are some of the data object registries that are used in different places in the code. Not all of the self-registering objects are included in these. +# These are some of the data object registries that are used in different places in the +# code. Not all of the self-registering objects are included in these. analysis_task_registry = {} data_object_registry = {} diff --git a/yt/visualization/plot_container.py b/yt/visualization/plot_container.py index 5db1e0c6618..c9dd4e06472 100644 --- a/yt/visualization/plot_container.py +++ b/yt/visualization/plot_container.py @@ -914,7 +914,8 @@ def _sanitize_units(z, _field): # only certain subclasses have a frb attribute # they can rely on for inspecting units mylog.warning( - "%s class doesn't support zmin/zmax set as tuples or YTQuantity", + "%s class doesn't support zmin/zmax" + " as tuples or unyt_quantitiy", self.__class__.__name__, ) z = z.value From bf5dd5f95101671150fa8b94b94dac3fad578f81 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 10:26:46 +0200 Subject: [PATCH 393/653] update flynt version in precommit hook and lint requirements, use the new '--version' option --- .pre-commit-config.yaml | 2 +- .travis.yml | 2 +- tests/lint_requirements.txt | 2 +- 3 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 62efb8ce42d..61211f27778 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -12,6 +12,6 @@ hooks: - id: flake8 - repo: https://github.com/ikamensh/flynt - rev: '0.51' # keep in sync with tests/lint_requirements.txt + rev: '0.52' # keep in sync with tests/lint_requirements.txt hooks: - id: flynt diff --git a/.travis.yml b/.travis.yml index 9b55523c2df..f56a39376f4 100644 --- a/.travis.yml +++ b/.travis.yml @@ -103,7 +103,7 @@ jobs: name: "flynt" python: 3.6 script: | - flynt -h | head -n 1 # this is a workaround to print only the version number + flynt --version flynt yt/ --fail-on-change --dry-run -e yt/extern - stage: tests diff --git a/tests/lint_requirements.txt b/tests/lint_requirements.txt index 5025bebf56b..6f68aa66dda 100644 --- a/tests/lint_requirements.txt +++ b/tests/lint_requirements.txt @@ -5,4 +5,4 @@ pyflakes==2.2.0 isort==5.2.1 # keep in sync with .pre-commit-config.yaml black==19.10b0 flake8-bugbear -flynt==0.51 # keep in sync with .pre-commit-config.yaml +flynt==0.52 # keep in sync with .pre-commit-config.yaml From 0be2b6bad857c0ce384985a9ca42b03781b000d0 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 3 Feb 2020 09:14:07 +0000 Subject: [PATCH 394/653] Add support for vertex-centred data computation --- yt/data_objects/octree_subset.py | 54 ++++++++++++++++++++++++-- yt/frontends/ramses/data_structures.py | 6 +++ 2 files changed, 56 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 78e1def558f..8f42b46f2ac 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -104,7 +104,7 @@ def mask_refinement(self, selector): def select_blocks(self, selector): mask = self.oct_handler.mask(selector, domain_id=self.domain_id) - slicer = OctreeSubsetBlockSlice(self) + slicer = OctreeSubsetBlockSlice(self, self.ds) for i, sl in slicer: yield sl, np.atleast_3d(mask[i, ...]) @@ -528,6 +528,7 @@ def __init__(self, ind, block_slice): self.block_slice = block_slice nz = self.block_slice.octree_subset.nz self.ActiveDimensions = np.array([nz, nz, nz], dtype="int64") + self.ds = block_slice.ds def __getitem__(self, key): bs = self.block_slice @@ -572,8 +573,10 @@ def dds(self): def clear_data(self): pass - def get_vertex_centered_data(self, *args, **kwargs): - raise NotImplementedError + def get_vertex_centered_data(self, fields, smoothed=False, no_ghost=False): + field = fields[0] + new_field = self.block_slice.get_vertex_centered_data(fields)[field] + return {field: new_field[..., self.ind]} @contextmanager def _field_parameter_state(self, field_parameters): @@ -581,13 +584,56 @@ def _field_parameter_state(self, field_parameters): class OctreeSubsetBlockSlice: - def __init__(self, octree_subset): + def __init__(self, octree_subset, ds): self.octree_subset = octree_subset + self.ds = ds + self._vertex_centered_data = {} # Cache some attributes for attr in ["ires", "icoords", "fcoords", "fwidth"]: v = getattr(octree_subset, attr) setattr(self, f"_{attr}", octree_subset._reshape_vals(v)) + @property + def octree_subset_with_gz(self): + subset_with_gz = getattr(self, "_octree_subset_with_gz", None) + if not subset_with_gz: + self._octree_subset_with_gz = self.octree_subset.retrieve_ghost_zones(1, []) + return self._octree_subset_with_gz + + def get_vertex_centered_data(self, fields, smoothed=False, no_ghost=False): + if no_ghost is True: + raise NotImplementedError( + "get_vertex_centered_data without ghost zones for oct-based datasets has not been implemented." + ) + + # Make sure the field list has only unique entries + fields = list(set(fields)) + new_fields = {} + cg = self.octree_subset_with_gz + for field in fields: + if field in self._vertex_centered_data: + new_fields[field] = self._vertex_centered_data[field] + else: + finfo = self.ds._get_field_info(field) + orig_field = cg[field] + nocts = orig_field.shape[-1] + new_field = np.zeros((3, 3, 3, nocts), order="F") + new_field += orig_field[1:, 1:, 1:] + new_field += orig_field[:-1, 1:, 1:] + new_field += orig_field[1:, :-1, 1:] + new_field += orig_field[1:, 1:, :-1] + new_field += orig_field[:-1, 1:, :-1] + new_field += orig_field[1:, :-1, :-1] + new_field += orig_field[:-1, :-1, 1:] + new_field += orig_field[:-1, :-1, :-1] + new_field *= 0.125 + + new_fields[field] = self.ds.arr(new_field, finfo.output_units) + + self._vertex_centered_data[field] = new_fields[field] + + return new_fields + def __iter__(self): for i in range(self._ires.shape[-1]): yield i, OctreeSubsetBlockSlicePosition(i, self) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 4be5ba45766..1fd5bf75c2d 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -349,10 +349,16 @@ def fill(self, fd, fields, selector, file_handler): ) def retrieve_ghost_zones(self, ngz, fields, smoothed=False): + if smoothed: + raise NotImplementedError + new_subset = RAMSESDomainSubset( self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self ) + # Cache the fields + new_subset.get_data(fields) + return new_subset From 1a4d07d9b0fe0945d730aff128ac47173ee0bc19 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 10 Aug 2020 08:53:59 +0200 Subject: [PATCH 395/653] Change error into warning --- yt/frontends/ramses/data_structures.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 1fd5bf75c2d..a5dd0198230 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -350,7 +350,12 @@ def fill(self, fd, fields, selector, file_handler): def retrieve_ghost_zones(self, ngz, fields, smoothed=False): if smoothed: - raise NotImplementedError + mylog.warning( + f"{self}.retrieve_ghost_zones was called with the " + f"`smoothed` argument set to True. This is not supported, " + "ignoring it." + ) + smoothed = False new_subset = RAMSESDomainSubset( self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self From 010b4293eb198d2fe3ee036b2de72b5255a6d781 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 10 Aug 2020 09:00:04 +0200 Subject: [PATCH 396/653] Use retrieve_ghost_zone from #2610 --- yt/frontends/ramses/data_structures.py | 21 ++++++++++++++++----- 1 file changed, 16 insertions(+), 5 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a5dd0198230..79bc80fb95a 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -357,12 +357,23 @@ def retrieve_ghost_zones(self, ngz, fields, smoothed=False): ) smoothed = False - new_subset = RAMSESDomainSubset( - self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self - ) + try: + new_subset = self._subset_with_gz + mylog.debug( + "Reusing previous subset with ghost zone for domain %s", self.domain_id + ) + except AttributeError: + new_subset = RAMSESDomainSubset( + self.base_region, + self.domain, + self.ds, + num_ghost_zones=ngz, + base_grid=self, + ) + self._subset_with_gz = new_subset - # Cache the fields - new_subset.get_data(fields) + # Cache the fields + new_subset.get_data(fields) return new_subset From 04acdf8d13499e5cda6736b458955879e21663c6 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 10 Aug 2020 15:08:11 +0200 Subject: [PATCH 397/653] Include comment --- yt/data_objects/octree_subset.py | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 8f42b46f2ac..58a9e6a7ac2 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -1,4 +1,5 @@ from contextlib import contextmanager +from itertools import product, repeat import numpy as np @@ -618,14 +619,11 @@ def get_vertex_centered_data(self, fields, smoothed=False, no_ghost=False): orig_field = cg[field] nocts = orig_field.shape[-1] new_field = np.zeros((3, 3, 3, nocts), order="F") - new_field += orig_field[1:, 1:, 1:] - new_field += orig_field[:-1, 1:, 1:] - new_field += orig_field[1:, :-1, 1:] - new_field += orig_field[1:, 1:, :-1] - new_field += orig_field[:-1, 1:, :-1] - new_field += orig_field[1:, :-1, :-1] - new_field += orig_field[:-1, :-1, 1:] - new_field += orig_field[:-1, :-1, :-1] + + # Compute vertex-centred data as mean of 8 neighbours cell data + slices = (slice(1, None), slice(None, -1)) + for slx, sly, slz in product(*repeat(slices, 3)): + new_field += orig_field[slx, sly, slz] new_field *= 0.125 new_fields[field] = self.ds.arr(new_field, finfo.output_units) From 9044335e8a0ffee75a0e5765138bd479992d6cf2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 10:42:55 +0200 Subject: [PATCH 398/653] [ci skip] tmp deactivate coverage badge --- README.md | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/README.md b/README.md index 9d5f2d50781..2a938da78e9 100644 --- a/README.md +++ b/README.md @@ -3,13 +3,15 @@ [![Users' Mailing List](https://img.shields.io/badge/Users-List-lightgrey.svg)](https://mail.python.org/archives/list/yt-users@python.org//) [![Devel Mailing List](https://img.shields.io/badge/Devel-List-lightgrey.svg)](https://mail.python.org/archives/list/yt-dev@python.org//) [![Build Status](https://img.shields.io/travis/yt-project/yt.svg?branch=master)](https://travis-ci.org/yt-project/yt) -[![codecov](https://codecov.io/gh/yt-project/yt/branch/master/graph/badge.svg)](https://codecov.io/gh/yt-project/yt) [![Latest Documentation](https://img.shields.io/badge/docs-latest-brightgreen.svg)](http://yt-project.org/docs/dev/) [![Data Hub](https://img.shields.io/badge/data-hub-orange.svg)](https://hub.yt/) [![Powered by NumFOCUS](https://img.shields.io/badge/powered%20by-NumFOCUS-orange.svg?style=flat&colorA=E1523D&colorB=007D8A)](http://numfocus.org) [![Sponsor our Project](https://img.shields.io/badge/donate-to%20yt-blueviolet)](https://numfocus.salsalabs.org/donate-to-yt/index.html) [![Code style: black](https://img.shields.io/badge/code%20style-black-000000.svg)](https://github.com/psf/black) + yt is an open-source, permissively-licensed python package for analyzing and From f4ebd73ce80545c482ba87573d82ce650c92ebea Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 17:12:03 +0100 Subject: [PATCH 399/653] Add octree raytracing C++ code --- yt/utilities/lib/octree_raytracing.cpp | 601 +++++++++++++++++++++++++ 1 file changed, 601 insertions(+) create mode 100644 yt/utilities/lib/octree_raytracing.cpp diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp new file mode 100644 index 00000000000..42357013faf --- /dev/null +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -0,0 +1,601 @@ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + + +typedef double F; + +void print_with_prefix(std::string s, int count, char c='\t') { + for (auto i = 0; i < count; ++i) std::cout << c; + std::cout << s << std::endl; +} + +const bool debug = false; +const bool debug_ray = false; + +/* A simple node struct that contains a key and a fixed number of children, + typically Nchildren = 2**Ndim + */ +template +struct GenericNode +{ + using _Node = struct GenericNode; + + // Tree data + _Node** children = nullptr; + _Node* parent = nullptr; + + // Node data + keyType key; + int level = 0; + bool terminal = false; + int index = -1; +}; + +template +struct RayInfo { + std::vector keys; + std::vector t; + + RayInfo() {}; + RayInfo(int N) { + if (N > 0) { + keys.reserve(N); + t.reserve(2*N); + } + } +}; + +template +struct Ray { + std::array o; // Origin + std::array d; // Direction + F tmin = -1e99; + F tmax = 1e99; + + Ray(const std::array _o, const std::array _d, const F _tmin, const F _tmax) : o(_o), tmin(_tmin), tmax(_tmax) { + F dd = 0; + for (auto idim = 0; idim < Ndim; ++idim) { + dd += _d[idim] * _d[idim]; + } + dd = std::sqrt(dd); + for (auto idim = 0; idim < Ndim; ++idim) { + d[idim] = _d[idim] / dd; + } + }; + + Ray(const F* _o, const F* _d, const F _tmin, const F _tmax) : tmin(_tmin), tmax(_tmax) { + for (auto idim = 0; idim < Ndim; ++idim) { + o[idim] = _o[idim]; + } + F dd = 0; + for (auto idim = 0; idim < Ndim; ++idim) { + dd += _d[idim] * _d[idim]; + } + dd = std::sqrt(dd); + for (auto idim = 0; idim < Ndim; ++idim) { + d[idim] = _d[idim] / dd; + } + }; + + Ray() {}; +}; + +/* Converts an array of integer position into a flattened index. + The fast varying index is the last one. + */ +template +inline unsigned char ijk2iflat(const std::array ijk) { + unsigned char iflat = 0; + for (auto i : ijk) { + iflat += i; + iflat <<= 1; + } + return iflat >> 1; +}; + +/* Converts a flattened index into an array of integer position. + The fast varying index is the last one. +*/ +template +inline std::array iflat2ijk(unsigned char iflat) { + std::array ijk; + for (auto idim = Ndim-1; idim >= 0; --idim) { + ijk[idim] = iflat & 0b1; + iflat >>= 1; + } + return ijk; +}; + +/* A class to build an octree and cast rays through it. */ +template +class Octree { + using Node = struct GenericNode; + using keyVector = std::vector; + using Pos = std::array; + using iPos = std::array; + using ucPos = std::array; + +private: + const unsigned char twotondim; + const int maxDepth; + Pos size; + Pos DLE; // Domain left edge + Pos DRE; // Domain right edge + Node* root; + int global_index = 0; + +public: + Octree(int _maxDepth, F* _size) : + twotondim (1<children = (Node**) malloc(sizeof(Node*)*twotondim); + for (auto i = 0; i < twotondim; ++i) root->children = nullptr; + + DLE.fill(0); + DRE = size; + } + + ~Octree() { + recursive_remove_node(root); + }; + + /* + Insert a new node in the tree. + */ + Node* insert_node(const iPos ipos, const int lvl, keyType key) { + assert(lvl <= maxDepth); + + // std::cerr << "Inserting at level: " << lvl << "/" << maxDepth << std::endl; + // this is 0b100..., where the 1 is at position maxDepth + uint mask = 1<<(maxDepth - 1); + + iPos ijk = ipos; + std::array bitMask; + + Node* node = root; + Node* child = nullptr; + + if (debug )std::cerr << "Creating tree at position " << ipos[0] << " " << ipos[1] << " " << ipos[2] << ", level=" << lvl << "/" << maxDepth << std::endl; + + // Go down the tree + for (auto ibit = maxDepth-1; ibit >= maxDepth - lvl; --ibit) { + if (debug) std::cerr << "\tbit=" << ibit << std::endl; + // Find children based on bits + if (debug) std::cerr << "\t\tbitmask: "; + for (auto idim = 0; idim < Ndim; ++idim) { + bitMask[idim] = ijk[idim] & mask; + if (debug) std::cerr << bitMask[idim]; + } + if (debug) std::cerr << std::endl; + mask >>= 1; + auto iflat = ijk2iflat(bitMask); + if (debug) std::cerr << "\t→ node[" << node->index << "].children[" << (int)iflat << "]" << std::endl; + + // Create child if it does not exist yet + child = create_get_node(node, iflat); + node = child; + } + + // Mark last node as terminal + node->terminal = true; + node->key = key; + + return node; + } + + Node* insert_node(const int* ipos, const int lvl, keyType key) { + std::array ipos_as_arr; + for (auto idim = 0; idim < Ndim; ++idim) ipos_as_arr[idim] = ipos[idim]; + return insert_node(ipos_as_arr, lvl, key); + } + + void insert_node_no_ret(const int* ipos, const int lvl, keyType key) { + Node* n = insert_node(ipos, lvl, key); + if (debug) { + std::cout << "Inserted node at position " << ipos[0] << " " << ipos[1] << " " << ipos[2]; + std::cout << " with key " << n->key << " and index " << n->index << std::endl; + } + } + + // Perform multiple ray cast + RayInfo** cast_rays(const F *origins, const F *directions, const int Nrays) { + // std::vector> *ray_infos = mallocnew std::vector>(Nrays); + RayInfo **ray_infos = (RayInfo**)malloc(sizeof(RayInfo*)*Nrays); + int Nfound = 0; + #pragma omp parallel for + for (auto i = 0; i < Nrays; ++i) { + std::vector tList; + ray_infos[i] = new RayInfo(Nfound); + auto ri = ray_infos[i]; + Ray r(&origins[3*i], &directions[3*i], -1e99, 1e99); + cast_ray(&r, ri->keys, ri->t); + if (debug || debug_ray) { + std::cout << "Length of kv: " << ri->keys.size() << std::endl; + for (auto v: ri->keys) std::cout << v << " "; + std::cout << std::endl; + } + Nfound = std::max(Nfound, (int) ri->keys.size()); + } + return ray_infos; + } + + // Perform single ray tracing + void cast_ray(Ray *r, keyVector &keyList, std::vector &tList) { + if (debug || debug_ray) { + std::cout << "Entering cast_ray | o=" << + r->o[0] << " " << r->o[1] << " " << r->o[2] << " | d = " << + r->d[0] << " " << r->d[1] << " " << r->d[2] << std::endl; + } + // Boolean mask for direction + unsigned char a = 0; + unsigned char bmask = twotondim >> 1; + + // Put ray in positive direction and store info in bitmask "a" + for (auto idim = 0; idim < Ndim; ++idim) { + if (r->d[idim] < 0.0) { + r->o[idim] = size[idim]-r->o[idim]; + r->d[idim] = -r->d[idim]; + a |= bmask; + } + bmask >>= 1; + } + + // Compute intersection points + Pos t0, t1; + for (auto idim = 0; idim < Ndim; ++idim){ + t0[idim] = (DLE[idim] - r->o[idim]) / r->d[idim]; + t1[idim] = (DRE[idim] - r->o[idim]) / r->d[idim]; + } + + // If entry point is smaller than exit point, find path in octree + if (*std::max_element(t0.begin(), t0.end()) < *std::min_element(t1.begin(), t1.end())) + proc_subtree(t0[0], t0[1], t0[2], + t1[0], t1[1], t1[2], + root, a, keyList, tList); + if (debug || debug_ray) std::cout << "Leaving cast_ray" << std::endl; + } + +private: + + /* + Upsert a node as a child of another. + + This will create a new node as a child of the current one, or return + an existing one if it already exists + */ + Node* create_get_node(Node* parent, int iflat) { + // Create children if not already existing + if (parent->children == nullptr) { + if (debug) std::cerr << "Allocating children for node " << parent->index << std::endl; + parent->children = (Node**) malloc(sizeof(Node*)*twotondim); + for (auto i = 0; i < twotondim; ++i) parent->children[i] = nullptr; + } + + if (parent->children[iflat] == nullptr) { + if (debug) std::cerr << "Creating node[" << parent->index << "].children[" << iflat << "]" << std::endl; + Node* node = new Node(); + node->level = parent->level + 1; + node->index = global_index; + node->parent = parent; + ++global_index; + + parent->children[iflat] = node; + } + return parent->children[iflat]; + } + + /* + Recursively free memory. + */ + void recursive_remove_node(Node* node) { + if (node->children) { + for (auto i = 0; i < twotondim; ++i) { + auto child = node->children[i]; + if (child) { + recursive_remove_node(child); + } + free(child); + } + free(node->children); + } + } + + /* + Traverse the tree, assuming that the ray intersects + From http://wscg.zcu.cz/wscg2000/Papers_2000/X31.pdf + */ + void proc_subtree(const F tx0, const F ty0, const F tz0, + const F tx1, const F ty1, const F tz1, + const Node *n, const unsigned char a, + keyVector &keyList, std::vector &tList, int lvl=0) { + if ((debug || debug_ray) && n) print_with_prefix("Entering proc_subtree in node " + std::to_string(n->index), lvl); + // Check if exit face is not in our back + if (tx1 < 0 || ty1 < 0 || tz1 < 0) { + if ((debug || debug_ray)) print_with_prefix("Leaving because tx1|ty1|tz1<0", lvl); + return; + } + + // Exit if the node is null (happens if it hasn't been added to the tree) + if (!n) { + if ((debug || debug_ray)) print_with_prefix("Leaving because node is null", lvl); + return; + } + + // Process leaf node + if (n->terminal) { + if (debug || debug_ray) print_with_prefix("Inserting node in keyList (index=" + std::to_string(n->key) + ")", lvl); + keyList.push_back(n->key); + // Push entry & exit t + tList.push_back(std::max(std::max(tx0, ty0), tz0)); + tList.push_back(std::min(std::min(tx1, ty1), tz1)); + assert(n->children == nullptr); + return; + } + + // Early break for leafs without children + if (n->children == nullptr) { + if ((debug || debug_ray)) print_with_prefix("Leaving because no children", lvl); + return; + } + + // Compute middle intersection + F txm, tym, tzm; + txm = (tx0 + tx1) * 0.5; + tym = (ty0 + ty1) * 0.5; + tzm = (tz0 + tz1) * 0.5; + + unsigned char iNode = first_node(tx0, ty0, tz0, txm, tym, tzm); + if (debug || debug_ray) print_with_prefix("First node: " + std::to_string(int(iNode)), lvl); + + // Iterate over children + do { + + switch (iNode) + { + case 0: + proc_subtree(tx0, ty0, tz0, txm, tym, tzm, n->children[a], a, keyList, tList, lvl+1); + if (debug || debug_ray) { + // do something + } + iNode = next_node(txm, tym, tzm, 4, 2, 1); + if (debug || debug_ray) print_with_prefix("From 0 to " + std::to_string(iNode), lvl); + break; + case 1: + proc_subtree(tx0, ty0, tzm, txm, tym, tz1, n->children[1^a], a, keyList, tList, lvl+1); + iNode = next_node(txm, tym, tz1, 5, 3, 8); + if (debug || debug_ray) print_with_prefix("From 1 to " + std::to_string(iNode), lvl); + break; + case 2: + proc_subtree(tx0, tym, tz0, txm, ty1, tzm, n->children[2^a], a, keyList, tList, lvl+1); + iNode = next_node(txm, ty1, tzm, 6, 8, 3); + if (debug || debug_ray) print_with_prefix("From 2 to " + std::to_string(iNode), lvl); + break; + case 3: + proc_subtree(tx0, tym, tzm, txm, ty1, tz1, n->children[3^a], a, keyList, tList, lvl+1); + iNode = next_node(txm, ty1, tz1, 7, 8, 8); + if (debug || debug_ray) print_with_prefix("From 3 to " + std::to_string(iNode), lvl); + break; + case 4: + proc_subtree(txm, ty0, tz0, tx1, tym, tzm, n->children[4^a], a, keyList, tList, lvl+1); + iNode = next_node(tx1, tym, tzm, 8, 6, 5); + if (debug || debug_ray) print_with_prefix("From 4 to " + std::to_string(iNode), lvl); + break; + case 5: + proc_subtree(txm, ty0, tzm, tx1, tym, tz1, n->children[5^a], a, keyList, tList, lvl+1); + iNode = next_node(tx1, tym, tz1, 8, 7, 8); + if (debug || debug_ray) print_with_prefix("From 5 to " + std::to_string(iNode), lvl); + break; + case 6: + proc_subtree(txm, tym, tz0, tx1, ty1, tzm, n->children[6^a], a, keyList, tList, lvl+1); + iNode = next_node(tx1, ty1, tzm, 8, 8, 7); + if (debug || debug_ray) print_with_prefix("From 6 to " + std::to_string(iNode), lvl); + break; + case 7: + proc_subtree(txm, tym, tzm, tx1, ty1, tz1, n->children[7^a], a, keyList, tList, lvl+1); + iNode = 8; + if (debug || debug_ray) print_with_prefix("From 7 to " + std::to_string(iNode), lvl); + break; + } + } while (iNode < twotondim); + + if (debug || debug_ray) print_with_prefix("Leaving proc_subtree", lvl); + } + + // From "An Efficient Parametric Algorithm for Octree Traversal" by Revelles, Urena, & Lastra + inline unsigned char first_node(const F tx0, const F ty0, const F tz0, + const F txm, const F tym, const F tzm) { + unsigned char index = 0; + if (tx0 >= std::max(ty0, tz0)) { // enters YZ plane + if (tym < tx0) index |= 0b010; + if (tzm < tx0) index |= 0b001; + } else if (ty0 >= std::max(tx0, tz0)) { // enters XZ plane + if (txm < ty0) index |= 0b100; + if (tzm < ty0) index |= 0b001; + } else { // enters XY plane + if (txm < tz0) index |= 0b100; + if (tym < tz0) index |= 0b010; + } + return index; + } + // From "An Efficient Parametric Algorithm for Octree Traversal" by Revelles, Urena, & Lastra + inline unsigned char next_node(const F tx, const F ty, const F tz, + const u_char ix, const u_char iy, const u_char iz) { + if(tx < std::min(ty, tz)) { // YZ plane + return ix; + } else if (ty < std::min(tx, tz)) { // XZ plane + return iy; + } else { // XY plane + return iz; + } + } +}; + + +// Define some instances for easy use in Python +typedef Ray<3> Ray3D; +typedef RayInfo Ray3DInt; +typedef RayInfo Ray3DLong; +template +using Octree3D = Octree; + +// Instantiate stuff +template class Octree; + +void test1() { + + // std::array bitMask = {true, false, true}; + + for (unsigned char i = 0; i < 8; i++){ + // auto tmp = iflat2ijk<3>(i); + // std::cout << (int)i << " -> " << tmp[0] << tmp[1] << tmp[2] << " -> " << (int)ijk2iflat<3>(iflat2ijk<3>(i)) << std::endl; + assert(ijk2iflat<3>(iflat2ijk<3>(i)) == i); + } + +} + +void test2() { + // Shouldnt crash + int index = 0; + int N = 4; + F size[3] = {1, 1, 1}; + Octree, 3> o(N, size); + for (auto i = 0; i < 1<({i, j, k})); + ++index; + } + } + } +} + +void test3(){ + int N = 4; + F size[3] = {1, 1, 1}; + Octree, 3> o(N, size); + F ox, oy, oz; + F rx, ry, rz; + // std::cin >> ox >> oy >> oz; + // std::cin >> rx >> ry >> rz; + + ox = 0.01; + oy = 0.84; + oz = 0.95; + + rx = 1.; + ry = -1.2; + rz = -1.5; + + F oo[3]; + oo[0] = ox; + oo[1] = oy; + oo[2] = oz; + F rr[3]; + rr[0] = rx; + rr[1] = ry; + rr[2] = rz; + Ray<3> r(oo, rr, -1e99, 1e99); + std::cerr<< "Casting ray in direction:\t" << rx << ", " << ry << ", " << rz << "(len=" << std::sqrt(r.d[0]*r.d[0] + r.d[1]*r.d[1] + r.d[2]*r.d[2]) << ")" <> ret; + std::vector tList; + // o.cast_ray(&r, ret, tList); +} + + +void test4() { + int N = 6; + F size[3] = {1, 1, 1}; + Octree, 3> o(N, size); + + // Filling half of octree at level 3 + for (auto i = 0; i < 1<> pos(1024*1024); + std::vector> dir(1024*1024); + + std::mt19937 gen(16091992); + auto dis = std::uniform_real_distribution<> (0., 1.); + for (auto i = 0; i < (int) pos.size(); ++i) { + for (auto idim = 0; idim < 3; ++idim) { + pos[i][idim] = dis(gen); + dir[i][idim] = dis(gen)*2-1; + } + } + // auto ret = o.cast_rays(pos, dir, (int) pos.size()); + // for (auto k: ret) { + // std::cout << k[0] << " " << k[1] << " " << k[2] << std::endl; + // } +} + +void test5() { + std::ifstream inFile; + + inFile.open("/tmp/ipos.txt"); + int ix, iy, iz; + std::vector> ipos; + while (inFile >> ix >> iy >> iz) { + ipos.push_back({ix, iy, iz}); + } + + inFile.close(); + + inFile.open("/tmp/lvl.txt"); + std::vector ilvl; + while (inFile >> ix) { + ilvl.push_back(ix); + } + + // Create octree + double size[3] = {1, 1, 1}; + Octree3D oct(16, size); + for (auto i = 0; i < (int) ipos.size(); ++i) { + oct.insert_node(&ipos[i][0], ilvl[i], i); + } + + // Now cast a ray + double o[3] = {0.5, 0.5, 0.5}; + double d[3] = {1., 2., 3.}; + Ray3D r(o, d, -1e99, 1e99); + std::vector keyList; + std::vector tList; + oct.cast_ray(&r, keyList, tList); +} + +int main() { + // std::cout << "########################## TEST 1 ##########################" << std::endl; + // test1(); + // std::cout << "########################## TEST 2 ##########################" << std::endl; + // test2(); + // std::cout << "########################## TEST 3 ##########################" << std::endl; + // test3(); + // std::cout << "########################## TEST 4 ##########################" << std::endl; + // test4(); + std::cout << "########################## TEST 5 ##########################" << std::endl; + test5(); + return 0; +} From d4c3c9ae08304b7e13a1fcfa9a5c6fcb4ea1bcce Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 17:33:58 +0100 Subject: [PATCH 400/653] Base stuff for cython/c++ interaction --- yt/utilities/lib/cyoctree_raytracing.pyx | 93 ++++++++++++++++++++++++ yt/utilities/lib/octree_raytracing.cpp | 16 ++++ 2 files changed, 109 insertions(+) create mode 100644 yt/utilities/lib/cyoctree_raytracing.pyx diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx new file mode 100644 index 00000000000..2de2efb5467 --- /dev/null +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -0,0 +1,93 @@ +"""This is a wrapper around the C++ class to efficiently cast rays into an octree. +It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. +""" + + +cimport numpy as np +import numpy as np +from libcpp.vector cimport vector +cimport cython +from libc.stdlib cimport free + +cdef extern from "octree_raytracing.cpp": + cdef cppclass RayInfo[T]: + vector[T] keys + vector[double] t + + cdef cppclass Octree3D[T]: + Octree3D(int depth, double* size) + Octree3D(int depth, double* LE, double* RE) + void insert_node_no_ret(const int* ipos, const int lvl, T key) + RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) + +cdef class OctreeRayTracing: + cdef Octree3D[int]* oct + cdef int depth + + def __init__(self, data_source): + cdef double* LE = [0, 0, 0] + cdef double* RE = [1, 1, 1] + cdef int depth = data_source.ds.parameters['levelmax'] + self.oct = new Octree3D[int](depth, LE, RE) + + self.depth = depth + + @cython.boundscheck(False) + @cython.wraparound(False) + def add_nodes(self, int[:, :] ipos_view, int[:] lvl_view, int[:] key): + cdef int i + cdef int ii[3] + + for i in range(len(key)): + ii[0] = ipos_view[i, 0] + ii[1] = ipos_view[i, 1] + ii[2] = ipos_view[i, 2] + self.oct.insert_node_no_ret(ii, lvl_view[i], key[i]) + + @cython.boundscheck(False) + @cython.wraparound(False) + def cast_rays(self, double[:, ::1] o, double[:, ::1] d): + cdef RayInfo[int]** ret + cdef int Nrays = len(o) + cdef RayInfo[int]* ri + + if Nrays == 0: + return + + # print('Casting rays') + + ret = self.oct.cast_rays(&o[0,0], &d[0,0], Nrays) + + # print('cast!') + # Now pack all the rays in numpy arrays + cdef int[:] key_view + cdef int* key_ptr + + cdef double[:] t_view + cdef double* t_ptr + + # print('Taking ownership of data') + + key_array, t_array = [], [] + for i in range(Nrays): + ri = ret[i] + if ri.keys.size() == 0: + key_array.append(np.array([], dtype=int)) + t_array.append(np.array([], dtype=np.float64)) + else: + key_ptr = &ri.keys[0] + key_view = key_ptr + key_array.append(np.asarray(key_view)) + + t_ptr = &ri.t[0] + t_view = t_ptr + t_array.append(np.asarray(t_view).reshape(-1, 2)) + + free(ret[i]) + + # We can now free the *list* of vectors, note that the memory is now managed by numpy! + free(ret) + return key_array, t_array + + def __dealloc__(self): + del self.oct \ No newline at end of file diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 42357013faf..743f3c63623 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -147,6 +147,22 @@ class Octree { DRE = size; } + Octree(int _maxDepth, F* _DLE, F* _DRE) : + twotondim (1<children = (Node**) malloc(sizeof(Node*)*twotondim); + for (auto i = 0; i < twotondim; ++i) root->children = nullptr; + } + + ~Octree() { recursive_remove_node(root); }; From 5a075c0909663fbcf486f43c2b4fbd7ae8a4a964 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 18:55:53 +0100 Subject: [PATCH 401/653] Wire things up --- yt/utilities/lib/cyoctree_raytracing.pyx | 14 +- yt/utilities/lib/pyoctree_raytracing.py | 63 +++++++ .../volume_rendering/render_source.py | 163 +++++++++++++++--- 3 files changed, 208 insertions(+), 32 deletions(-) create mode 100644 yt/utilities/lib/pyoctree_raytracing.py diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index 2de2efb5467..b7bbce66672 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -20,16 +20,14 @@ cdef extern from "octree_raytracing.cpp": void insert_node_no_ret(const int* ipos, const int lvl, T key) RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) -cdef class OctreeRayTracing: +cdef class CythonOctreeRayTracing: cdef Octree3D[int]* oct cdef int depth - def __init__(self, data_source): - cdef double* LE = [0, 0, 0] - cdef double* RE = [1, 1, 1] - cdef int depth = data_source.ds.parameters['levelmax'] - self.oct = new Octree3D[int](depth, LE, RE) - + def __init__(self, np.ndarray LE, np.ndarray RE, int depth): + cdef double* LE_ptr = LE.data + cdef double* RE_ptr = RE.data + self.oct = new Octree3D[int](depth, LE_ptr, RE_ptr) self.depth = depth @cython.boundscheck(False) @@ -43,7 +41,7 @@ cdef class OctreeRayTracing: ii[1] = ipos_view[i, 1] ii[2] = ipos_view[i, 2] self.oct.insert_node_no_ret(ii, lvl_view[i], key[i]) - + @cython.boundscheck(False) @cython.wraparound(False) def cast_rays(self, double[:, ::1] o, double[:, ::1] d): diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py new file mode 100644 index 00000000000..4b7c17a81f0 --- /dev/null +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -0,0 +1,63 @@ +from yt.funcs import iterable + +from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing +from yt.utilities.amr_kdtree.amr_kdtree import _apply_log +import numpy as np + +import operator + + + +class OctreeRayTracing(object): + octree = None + data_source = None + log_fields = None + fields = None + + def __init__(self, data_source): + self.data_source = data_source + LE = np.array([0, 0, 0], dtype=np.float64) + RE = np.array([1, 1, 1], dtype=np.float64) + depth = data_source.ds.parameters['levelmax'] + + self.octree = CythonOctreeRayTracing(LE, RE, depth) + ds = data_source.ds + + xyz = np.stack([data_source[_].to('unitary').value for _ in 'x y z'.split()], axis=-1) + lvl = data_source['grid_level'].astype(int).value + ds.parameters['levelmin'] + + ipos = np.floor(xyz * (1<<(ds.parameters['levelmax']))).astype(int) + self.octree.add_nodes(ipos.astype(np.int32), lvl.astype(np.int32), np.arange(len(ipos), dtype=np.int32)) + + def set_fields(self, fields, log_fields, no_ghost, force=False): + if no_ghost: + raise NotImplementedError('Cannot use no ghost with Octree datasets') + new_fields = self.data_source._determine_fields(fields) + regenerate_data = self.fields is None or \ + len(self.fields) != len(new_fields) or \ + self.fields != new_fields or force + if not iterable(log_fields): + log_fields = [log_fields] + new_log_fields = list(log_fields) + self.fields = new_fields + + if self.log_fields is not None and not regenerate_data: + flip_log = list(map(operator.ne, self.log_fields, new_log_fields)) + else: + flip_log = [False] * len(new_log_fields) + self.log_fields = new_log_fields + + # TODO: cache data in the 3x3x3 neighbouring cells + + def cast_rays(self, vp_pos, vp_dir): + # TODO: cache indices of cells + self.cell_index, self.tvalues = \ + self.octree.cast_rays(vp_pos, vp_dir) + + def sample(self, sampler): + # TODO: Apply to sampler to each oct encountered by all rays. + pass + + def traverse(self, viewpoint): + raise Exception() + self.octree.cast_rays() \ No newline at end of file diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 898399b3988..57395fd53b5 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -5,9 +5,12 @@ from yt.config import ytcfg from yt.data_objects.image_array import ImageArray from yt.funcs import ensure_numpy_array, iterable, mylog +from yt.geometry.grid_geometry_handler import GridIndex +from yt.geometry.oct_geometry_handler import OctreeIndex from yt.utilities.amr_kdtree.api import AMRKDTree from yt.utilities.lib.bounding_volume_hierarchy import BVH from yt.utilities.lib.misc_utilities import zlines, zpoints +from yt.utilities.lib.pyoctree_raytracing import OctreeRayTracing from yt.utilities.on_demand_imports import NotAModule from yt.utilities.parallel_tools.parallel_analysis_interface import ( ParallelAnalysisInterface, @@ -114,6 +117,13 @@ def set_zbuffer(self, zbuffer): self.zbuffer = zbuffer +def create_volume_source(data_source, field): + index_class = data_source.ds.index.__class__ + if issubclass(index_class, GridIndex): + return KDTreeVolumeSource(data_source, field) + elif issubclass(index_class, OctreeIndex): + return OctreeVolumeSource(data_source, field) + class VolumeSource(RenderSource): """A class for rendering data from a volumetric data source @@ -151,7 +161,7 @@ class VolumeSource(RenderSource): >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> sc = Scene() - >>> source = VolumeSource(ds.all_data(), 'density') + >>> source = create_volume_source(ds.all_data(), 'density') >>> sc.add_source(source) >>> sc.add_camera() >>> im = sc.render() @@ -234,12 +244,7 @@ def volume(self): This object does the heavy lifting to access data in an efficient manner using a KDTree """ - if self._volume is None: - mylog.info("Creating volume") - volume = AMRKDTree(self.data_source.ds, data_source=self.data_source) - self._volume = volume - - return self._volume + return self._get_volume() @volume.setter def volume(self, value): @@ -418,6 +423,68 @@ def set_sampler(self, camera, interpolated=True): self.sampler = sampler assert self.sampler is not None + @validate_volume + def render(self, camera, zbuffer=None): + """Renders an image using the provided camera + + Parameters + ---------- + camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + A volume rendering camera. Can be any type of camera. + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + A zbuffer array. This is used for opaque sources to determine the + z position of the source relative to other sources. Only useful if + you are manually calling render on multiple sources. Scene.render + uses this internally. + + Returns + ------- + A :class:`yt.data_objects.image_array.ImageArray` instance containing + the rendered image. + + """ + raise NotImplementedError() + + def finalize_image(self, camera, image): + """Parallel reduce the image. + + Parameters + ---------- + camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance + The camera used to produce the volume rendering image. + image: :class:`yt.data_objects.image_array.ImageArray` instance + A reference to an image to fill + """ + if self._volume is not None: + image = self.volume.reduce_tree_images(image, camera.lens.viewpoint) + image.shape = camera.resolution[0], camera.resolution[1], 4 + # If the call is from VR, the image is rotated by 180 to get correct + # up direction + if self.transfer_function.grey_opacity is False: + image[:, :, 3] = 1 + return image + + def __repr__(self): + disp = ":%s " % str(self.data_source) + disp += "transfer_function:%s" % str(self._transfer_function) + return disp + + +class KDTreeVolumeSource(VolumeSource): + def _get_volume(self): + """The abstract volume associated with this VolumeSource + + This object does the heavy lifting to access data in an efficient manner + using a KDTree + """ + + if self._volume is None: + mylog.info("Creating volume") + volume = AMRKDTree(self.data_source.ds, data_source=self.data_source) + self._volume = volume + + return self._volume + @validate_volume def render(self, camera, zbuffer=None): """Renders an image using the provided camera @@ -464,30 +531,78 @@ def render(self, camera, zbuffer=None): return self.current_image - def finalize_image(self, camera, image): - """Parallel reduce the image. + +class OctreeVolumeSource(VolumeSource): + def __init__(self, *args, **kwa): + super(OctreeVolumeSource, self).__init__(*args, **kwa) + self.set_use_ghost_zones(True) + + def _get_volume(self): + """The abstract volume associated with this VolumeSource + + This object does the heavy lifting to access data in an efficient manner + using an octree. + """ + + if self._volume is None: + mylog.info("Creating volume") + volume = OctreeRayTracing(self.data_source) + self._volume = volume + + return self._volume + + @validate_volume + def render(self, camera, zbuffer=None): + """Renders an image using the provided camera Parameters ---------- camera: :class:`yt.visualization.volume_rendering.camera.Camera` instance - The camera used to produce the volume rendering image. - image: :class:`yt.data_objects.image_array.ImageArray` instance - A reference to an image to fill + A volume rendering camera. Can be any type of camera. + zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer` instance + A zbuffer array. This is used for opaque sources to determine the + z position of the source relative to other sources. Only useful if + you are manually calling render on multiple sources. Scene.render + uses this internally. + + Returns + ------- + A :class:`yt.data_objects.image_array.ImageArray` instance containing + the rendered image. + """ - if self._volume is not None: - image = self.volume.reduce_tree_images(image, camera.lens.viewpoint) - image.shape = camera.resolution[0], camera.resolution[1], 4 - # If the call is from VR, the image is rotated by 180 to get correct - # up direction - if not self.transfer_function.grey_opacity: - image[:, :, 3] = 1 - return image + self.zbuffer = zbuffer + self.set_sampler(camera) + assert (self.sampler is not None) - def __repr__(self): - disp = f":{str(self.data_source)} " - disp += f"transfer_function:{str(self._transfer_function)}" - return disp + mylog.debug("Casting rays") + if self.check_nans: + for brick in self.volume.bricks: + for data in brick.my_data: + if np.any(np.isnan(data)): + raise RuntimeError + + sampler_params = camera._get_sampler_params(self) + vp_pos = sampler_params['vp_pos'].to('unitary').value.reshape(-1, 3) + vp_dir = sampler_params['vp_dir'].value.reshape(-1, 3).copy() + + self.volume.cast_rays(vp_pos, vp_dir) + mylog.debug("Done casting rays") + + mylog.debug("Sample rays") + self.volume.sample(self.sampler) + mylog.debug("Done sampling") + + self.current_image = self.finalize_image( + camera, self.sampler.aimage) + + if zbuffer is None: + self.zbuffer = ZBuffer( + self.current_image, + np.full(self.current_image.shape[:2], np.inf)) + + return self.current_image class MeshSource(OpaqueSource): """A source for unstructured mesh data. From 084d12614da835ab6a482b5453daccbf1bb492de Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 18:56:19 +0100 Subject: [PATCH 402/653] VolumeSource is for internal API, use "create_volume_source" instead This adds the possibility to dynamically decide whether to use the KDtree-backed implementation, or the octree-backed one. --- doc/source/cookbook/render_two_fields.py | 6 +++--- doc/source/cookbook/various_lens.py | 2 +- .../volume_rendering/off_axis_projection.py | 2 +- yt/visualization/volume_rendering/scene.py | 2 +- .../volume_rendering/tests/test_composite.py | 2 +- .../volume_rendering/tests/test_lenses.py | 12 ++++++------ .../volume_rendering/tests/test_points.py | 2 +- .../volume_rendering/tests/test_scene.py | 2 +- .../volume_rendering/tests/test_varia.py | 2 +- .../volume_rendering/tests/test_vr_orientation.py | 2 +- .../volume_rendering/tests/test_zbuff.py | 2 +- .../volume_rendering/volume_rendering.py | 4 ++-- 12 files changed, 20 insertions(+), 20 deletions(-) diff --git a/doc/source/cookbook/render_two_fields.py b/doc/source/cookbook/render_two_fields.py index 2aec3672c95..385e59ad74e 100644 --- a/doc/source/cookbook/render_two_fields.py +++ b/doc/source/cookbook/render_two_fields.py @@ -1,5 +1,5 @@ import yt -from yt.visualization.volume_rendering.api import Scene, VolumeSource +from yt.visualization.volume_rendering.api import Scene, create_volume_source filePath = "Sedov_3d/sedov_hdf5_chk_0003" ds = yt.load(filePath) @@ -15,13 +15,13 @@ cam.switch_orientation() # add rendering of density field -dens = VolumeSource(ds, field="dens") +dens = create_volume_source(ds, field="dens") dens.use_ghost_zones = True sc.add_source(dens) sc.save("density.png", sigma_clip=6) # add rendering of x-velocity field -vel = VolumeSource(ds, field="velx") +vel = create_volume_source(ds, field="velx") vel.use_ghost_zones = True sc.add_source(vel) sc.save("density_any_velocity.png", sigma_clip=6) diff --git a/doc/source/cookbook/various_lens.py b/doc/source/cookbook/various_lens.py index 54ade80ff12..96e03428370 100644 --- a/doc/source/cookbook/various_lens.py +++ b/doc/source/cookbook/various_lens.py @@ -14,7 +14,7 @@ # Follow the simple_volume_rendering cookbook for the first part of this. ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030") sc = Scene() -vol = VolumeSource(ds, field=field) +vol = create_volume_source(ds, field=field) tf = vol.transfer_function tf.grey_opacity = True diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 17be01b0d2d..6f43bda01c0 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -303,7 +303,7 @@ def off_axis_projection( funits = data_source.ds._get_field_info(item).units - vol = VolumeSource(data_source, item) + vol = create_volume_source(data_source, item) if weight is None: vol.set_field(item) else: diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 06d7e132151..8847eef2b69 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -53,7 +53,7 @@ class Scene: >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> sc = Scene() - >>> source = VolumeSource(ds.all_data(), 'density') + >>> source = create_volume_source(ds.all_data(), 'density') >>> sc.add_source(source) >>> cam = sc.add_camera() >>> im = sc.render() diff --git a/yt/visualization/volume_rendering/tests/test_composite.py b/yt/visualization/volume_rendering/tests/test_composite.py index ff60099a54b..baa3cc58d2c 100644 --- a/yt/visualization/volume_rendering/tests/test_composite.py +++ b/yt/visualization/volume_rendering/tests/test_composite.py @@ -49,7 +49,7 @@ def test_composite_vr(self): sc = Scene() cam = sc.add_camera(ds) cam.resolution = (512, 512) - vr = VolumeSource(dd, field=ds.field_list[0]) + vr = create_volume_source(dd, field=ds.field_list[0]) vr.transfer_function.clear() vr.transfer_function.grey_opacity = True vr.transfer_function.map_to_colormap(0.0, 1.0, scale=3.0, colormap="Reds") diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index f8f51f1ef06..a72dc6d2d4b 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -42,7 +42,7 @@ def test_perspective_lens(self): sc = Scene() cam = sc.add_camera(self.ds, lens_type="perspective") cam.position = self.ds.arr(np.array([1.0, 1.0, 1.0]), "code_length") - vol = VolumeSource(self.ds, field=self.field) + vol = create_volume_source(self.ds, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) @@ -53,7 +53,7 @@ def test_stereoperspective_lens(self): cam = sc.add_camera(self.ds, lens_type="stereo-perspective") cam.resolution = [256, 128] cam.position = self.ds.arr(np.array([0.7, 0.7, 0.7]), "code_length") - vol = VolumeSource(self.ds, field=self.field) + vol = create_volume_source(self.ds, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) @@ -67,7 +67,7 @@ def test_fisheye_lens(self): cam.set_width(self.ds.domain_width) v, c = self.ds.find_max("density") cam.set_position(c - 0.0005 * self.ds.domain_width) - vol = VolumeSource(dd, field=self.field) + vol = create_volume_source(dd, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) @@ -79,7 +79,7 @@ def test_plane_lens(self): cam = sc.add_camera(dd, lens_type="plane-parallel") cam.set_width(self.ds.domain_width * 1e-2) v, c = self.ds.find_max("density") - vol = VolumeSource(dd, field=self.field) + vol = create_volume_source(dd, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) @@ -90,7 +90,7 @@ def test_spherical_lens(self): cam = sc.add_camera(self.ds, lens_type="spherical") cam.resolution = [256, 128] cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), "code_length") - vol = VolumeSource(self.ds, field=self.field) + vol = create_volume_source(self.ds, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) @@ -103,7 +103,7 @@ def test_stereospherical_lens(self): cam = sc.add_camera(self.ds, lens_type="stereo-spherical") cam.resolution = [256, 256] cam.position = self.ds.arr(np.array([0.6, 0.5, 0.5]), "code_length") - vol = VolumeSource(self.ds, field=self.field) + vol = create_volume_source(self.ds, field=self.field) tf = vol.transfer_function tf.grey_opacity = True sc.add_source(vol) diff --git a/yt/visualization/volume_rendering/tests/test_points.py b/yt/visualization/volume_rendering/tests/test_points.py index 70db3158648..4901be7ab79 100644 --- a/yt/visualization/volume_rendering/tests/test_points.py +++ b/yt/visualization/volume_rendering/tests/test_points.py @@ -43,7 +43,7 @@ def test_points_vr(self): sc = Scene() cam = sc.add_camera(ds) cam.resolution = (512, 512) - vr = VolumeSource(dd, field=ds.field_list[0]) + vr = create_volume_source(dd, field=ds.field_list[0]) vr.transfer_function.clear() vr.transfer_function.grey_opacity = False vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds") diff --git a/yt/visualization/volume_rendering/tests/test_scene.py b/yt/visualization/volume_rendering/tests/test_scene.py index 14fad7ed38b..c2754b45299 100644 --- a/yt/visualization/volume_rendering/tests/test_scene.py +++ b/yt/visualization/volume_rendering/tests/test_scene.py @@ -58,7 +58,7 @@ def test_rotation(self): ma_bound = ((ma - mi) * (0.90)) + mi tf.map_to_colormap(mi_bound, ma_bound, scale=0.01, colormap="Blues_r") - vol2 = VolumeSource(dd2, field=("gas", "density")) + vol2 = create_volume_source(dd2, field=("gas", "density")) sc.add_source(vol2) tf = vol2.transfer_function diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index 74bc1adbdd1..ca4e33c11c6 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -71,7 +71,7 @@ def test_simple_volume_rendering(self): def test_lazy_volume_source_construction(self): sc = Scene() - source = VolumeSource(self.ds.all_data(), "density") + source = create_volume_source(self.ds.all_data(), "density") assert source._volume is None assert source._transfer_function is None diff --git a/yt/visualization/volume_rendering/tests/test_vr_orientation.py b/yt/visualization/volume_rendering/tests/test_vr_orientation.py index dd6a69a6723..93a2319b908 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py +++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py @@ -20,7 +20,7 @@ def test_orientation(): sc = Scene() - vol = VolumeSource(ds, field=("gas", "density")) + vol = create_volume_source(ds, field=("gas", "density")) sc.add_source(vol) tf = vol.transfer_function diff --git a/yt/visualization/volume_rendering/tests/test_zbuff.py b/yt/visualization/volume_rendering/tests/test_zbuff.py index e86f84ec5eb..496a99000eb 100644 --- a/yt/visualization/volume_rendering/tests/test_zbuff.py +++ b/yt/visualization/volume_rendering/tests/test_zbuff.py @@ -48,7 +48,7 @@ def test_composite_vr(self): sc = Scene() cam = sc.add_camera(ds) cam.resolution = (512, 512) - vr = VolumeSource(dd, field=ds.field_list[0]) + vr = create_volume_source(dd, field=ds.field_list[0]) vr.transfer_function.clear() vr.transfer_function.grey_opacity = True vr.transfer_function.map_to_colormap(0.0, 1.0, scale=10.0, colormap="Reds") diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index f583942b5e5..99fe4de2b28 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -1,7 +1,7 @@ from yt.funcs import mylog from yt.utilities.exceptions import YTSceneFieldNotFound -from .render_source import MeshSource, VolumeSource +from .render_source import MeshSource, VolumeSource, create_volume_source from .scene import Scene from .utils import data_source_or_all @@ -63,7 +63,7 @@ def create_scene(data_source, field=None, lens_type="plane-parallel"): if hasattr(data_source.ds.index, "meshes"): source = MeshSource(data_source, field=field) else: - source = VolumeSource(data_source, field=field) + source = create_volume_source(data_source, field=field) sc.add_source(source) sc.add_camera(data_source=data_source, lens_type=lens_type) From 3e5d55f22cc77aca9e94716a9103696488030f16 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 21:30:35 +0100 Subject: [PATCH 403/653] Simplifying python side --- yt/utilities/lib/pyoctree_raytracing.py | 31 +++++++++++++++++++------ 1 file changed, 24 insertions(+), 7 deletions(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 4b7c17a81f0..1587eac0aac 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -26,7 +26,7 @@ def __init__(self, data_source): xyz = np.stack([data_source[_].to('unitary').value for _ in 'x y z'.split()], axis=-1) lvl = data_source['grid_level'].astype(int).value + ds.parameters['levelmin'] - ipos = np.floor(xyz * (1<<(ds.parameters['levelmax']))).astype(int) + ipos = np.floor(xyz * (1<<(ds.parameters['levelmax']))).astype(int) self.octree.add_nodes(ipos.astype(np.int32), lvl.astype(np.int32), np.arange(len(ipos), dtype=np.int32)) def set_fields(self, fields, log_fields, no_ghost, force=False): @@ -50,13 +50,30 @@ def set_fields(self, fields, log_fields, no_ghost, force=False): # TODO: cache data in the 3x3x3 neighbouring cells def cast_rays(self, vp_pos, vp_dir): - # TODO: cache indices of cells - self.cell_index, self.tvalues = \ - self.octree.cast_rays(vp_pos, vp_dir) + """Cast the rays through the oct. - def sample(self, sampler): - # TODO: Apply to sampler to each oct encountered by all rays. - pass + Parameters + ---------- + vp_pos, vp_dir : float arrays (Nrays, Ndim) + The position (unitary) and direction of each ray + + Returns + ------- + cell_index : list of integer arrays of shape (Ncell) + For each ray, contains an ordered array of cell ids + that it intersects with + tvalues : list of float arrays of shape (Ncell, 2) + The t value at entry and exit for each cell. + """ + if not self._cell_index: + # TODO: cache indices of cells + self._cell_index, self._tvalues = \ + self.octree.cast_rays(vp_pos, vp_dir) + return self._cell_index, self._tvalues + + # def sample(self, sampler): + # # TODO: Apply to sampler to each oct encountered by all rays. + # self.octree.sample(sampler, self._cell_index, self._tvalues) def traverse(self, viewpoint): raise Exception() From ae1dcf14a62a4c2eaaff0da94ed7efde16c6cbd1 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 26 May 2020 21:31:02 +0100 Subject: [PATCH 404/653] Cast ray also calls the image sampler (need tidying though) --- yt/utilities/lib/cyoctree_raytracing.pyx | 71 +++++++++++++++--------- 1 file changed, 45 insertions(+), 26 deletions(-) diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index b7bbce66672..d9775703584 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -7,7 +7,11 @@ cimport numpy as np import numpy as np from libcpp.vector cimport vector cimport cython -from libc.stdlib cimport free +from cython.parallel import prange, parallel +from libc.stdlib cimport free, malloc + +from .image_samplers cimport ImageSampler, ImageAccumulator +from .volume_container cimport VolumeContainer cdef extern from "octree_raytracing.cpp": cdef cppclass RayInfo[T]: @@ -44,7 +48,7 @@ cdef class CythonOctreeRayTracing: @cython.boundscheck(False) @cython.wraparound(False) - def cast_rays(self, double[:, ::1] o, double[:, ::1] d): + def cast_rays(self, double[:, ::1] o, double[:, ::1] d, ImageSampler sampler, int num_threads = 0): cdef RayInfo[int]** ret cdef int Nrays = len(o) cdef RayInfo[int]* ri @@ -56,36 +60,51 @@ cdef class CythonOctreeRayTracing: ret = self.oct.cast_rays(&o[0,0], &d[0,0], Nrays) - # print('cast!') - # Now pack all the rays in numpy arrays - cdef int[:] key_view + cdef int* cell_ind + cdef double* tval + + cdef int i, j, vi, vj, nx, ny + cdef VolumeContainer *vc + cdef ImageAccumulator *idata cdef int* key_ptr - - cdef double[:] t_view cdef double* t_ptr - - # print('Taking ownership of data') - - key_array, t_array = [], [] - for i in range(Nrays): - ri = ret[i] - if ri.keys.size() == 0: - key_array.append(np.array([], dtype=int)) - t_array.append(np.array([], dtype=np.float64)) - else: + cdef int[3] index = [1, 1, 1] + + nx = np.round(Nrays**0.5) + ny = nx # TODO: change this + + with nogil, parallel(num_threads=num_threads): + idata = malloc(sizeof(ImageAccumulator)) + vc = malloc(sizeof(VolumeContainer)) + for j in prange(Nrays, schedule='static'): + vj = j % ny + vi = (j - vj) / ny + ri = ret[j] + if ri.keys.size() == 0: + continue + + for i in range(3): # TODO: change 3 to Nchannel + idata.rgba[i] = 0 + key_ptr = &ri.keys[0] - key_view = key_ptr - key_array.append(np.asarray(key_view)) - t_ptr = &ri.t[0] - t_view = t_ptr - t_array.append(np.asarray(t_view).reshape(-1, 2)) - + + # Iterate over cells + for i in range(ri.keys.size()): + # Now call the sampler on the list of cells + sampler.sample( + vc, + &o[j, 0], + &d[j, 0], + t_ptr[2*i ], + t_ptr[2*i+1], + index, + idata + ) + # Free memory + for i in range(Nrays): free(ret[i]) - - # We can now free the *list* of vectors, note that the memory is now managed by numpy! free(ret) - return key_array, t_array def __dealloc__(self): del self.oct \ No newline at end of file From de2782aa7cd89d0b6f54ce2aaf77f03d7ff7e7d0 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 14:03:22 +0100 Subject: [PATCH 405/653] Move to pxd file --- yt/utilities/lib/cyoctree_raytracing.pxd | 31 ++++++++++ yt/utilities/lib/cyoctree_raytracing.pyx | 77 +++++++++++++++--------- 2 files changed, 80 insertions(+), 28 deletions(-) create mode 100644 yt/utilities/lib/cyoctree_raytracing.pxd diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/cyoctree_raytracing.pxd new file mode 100644 index 00000000000..410aa3ba35e --- /dev/null +++ b/yt/utilities/lib/cyoctree_raytracing.pxd @@ -0,0 +1,31 @@ +"""This is a wrapper around the C++ class to efficiently cast rays into an octree. +It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. +""" + + +cimport numpy as np +import numpy as np +from libcpp.vector cimport vector +cimport cython +from cython.parallel import prange, parallel +from libc.stdlib cimport free, malloc + +from .image_samplers cimport ImageAccumulator, ImageSampler +from .grid_traversal cimport sampler_function +from .volume_container cimport VolumeContainer +from .partitioned_grid cimport PartitionedGrid + +cdef extern from "octree_raytracing.cpp": + cdef cppclass RayInfo[T]: + vector[T] keys + vector[double] t + + cdef cppclass Octree3D[T]: + Octree3D(int depth, double* size) + Octree3D(int depth, double* LE, double* RE) + void insert_node_no_ret(const int* ipos, const int lvl, T key) + RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) + +cdef class CythonOctreeRayTracing: + cdef Octree3D[int]* oct + cdef int depth diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index d9775703584..c8b4f4fe308 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -10,36 +10,27 @@ cimport cython from cython.parallel import prange, parallel from libc.stdlib cimport free, malloc -from .image_samplers cimport ImageSampler, ImageAccumulator +from .image_samplers cimport ImageAccumulator, ImageSampler +from .grid_traversal cimport sampler_function from .volume_container cimport VolumeContainer +from .partitioned_grid cimport PartitionedGrid + +DEF Nch = 4 -cdef extern from "octree_raytracing.cpp": - cdef cppclass RayInfo[T]: - vector[T] keys - vector[double] t - - cdef cppclass Octree3D[T]: - Octree3D(int depth, double* size) - Octree3D(int depth, double* LE, double* RE) - void insert_node_no_ret(const int* ipos, const int lvl, T key) - RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) - -cdef class CythonOctreeRayTracing: - cdef Octree3D[int]* oct - cdef int depth +cdef class CythonOctreeRayTracing: def __init__(self, np.ndarray LE, np.ndarray RE, int depth): cdef double* LE_ptr = LE.data cdef double* RE_ptr = RE.data self.oct = new Octree3D[int](depth, LE_ptr, RE_ptr) self.depth = depth - + @cython.boundscheck(False) @cython.wraparound(False) def add_nodes(self, int[:, :] ipos_view, int[:] lvl_view, int[:] key): cdef int i cdef int ii[3] - + for i in range(len(key)): ii[0] = ipos_view[i, 0] ii[1] = ipos_view[i, 1] @@ -47,35 +38,48 @@ cdef class CythonOctreeRayTracing: self.oct.insert_node_no_ret(ii, lvl_view[i], key[i]) @cython.boundscheck(False) - @cython.wraparound(False) - def cast_rays(self, double[:, ::1] o, double[:, ::1] d, ImageSampler sampler, int num_threads = 0): + @cython.wraparound(False) + def cast_rays(self, double[:, ::1] o, double[:, ::1] d, ImageSampler sampler, PartitionedGrid pg, int num_threads = 0): cdef RayInfo[int]** ret cdef int Nrays = len(o) cdef RayInfo[int]* ri - + + cdef sampler_function* sample = sampler.sample + if Nrays == 0: return - + # print('Casting rays') - + ret = self.oct.cast_rays(&o[0,0], &d[0,0], Nrays) cdef int* cell_ind cdef double* tval - cdef int i, j, vi, vj, nx, ny + cdef int i, j, k, vi, vj, nx, ny, icell cdef VolumeContainer *vc cdef ImageAccumulator *idata cdef int* key_ptr cdef double* t_ptr - cdef int[3] index = [1, 1, 1] + cdef int[3] index = [0, 0, 0] nx = np.round(Nrays**0.5) ny = nx # TODO: change this + cdef int n_fields = pg.container.n_fields + with nogil, parallel(num_threads=num_threads): idata = malloc(sizeof(ImageAccumulator)) vc = malloc(sizeof(VolumeContainer)) + vc.n_fields = 1 + vc.data = malloc(sizeof(np.float64_t*)) + vc.mask = malloc(8*sizeof(np.uint8_t)) + # The actual dimensions are 2x2x2, but the sampler + # assumes vertex-centred data for a 1x1x1 lattice (i.e. + # 2^3 vertices) + vc.dims[0] = 1 + vc.dims[1] = 1 + vc.dims[2] = 1 for j in prange(Nrays, schedule='static'): vj = j % ny vi = (j - vj) / ny @@ -83,17 +87,28 @@ cdef class CythonOctreeRayTracing: if ri.keys.size() == 0: continue - for i in range(3): # TODO: change 3 to Nchannel + for i in range(Nch): idata.rgba[i] = 0 - + for i in range(8): + vc.mask[i] = 1 key_ptr = &ri.keys[0] t_ptr = &ri.t[0] # Iterate over cells for i in range(ri.keys.size()): + icell = key_ptr[i] + for k in range(n_fields): + vc.data[k] = &pg.container.data[k][8*icell] + + # Fill the volume container + for k in range(3): + vc.left_edge[k] = pg.container.left_edge[3*icell+k] + vc.right_edge[k] = pg.container.right_edge[3*icell+k] + vc.dds[i] = (vc.right_edge[i] - vc.left_edge[i]) + vc.idds[i] = 1/vc.dds[i] # Now call the sampler on the list of cells - sampler.sample( - vc, + sample( + vc, &o[j, 0], &d[j, 0], t_ptr[2*i ], @@ -101,6 +116,12 @@ cdef class CythonOctreeRayTracing: index, idata ) + + for i in range(Nch): + idata.rgba[i] = 0 + free(vc.data) + free(vc) + free(idata) # Free memory for i in range(Nrays): free(ret[i]) From d76258ecc6e5df1ad05f2c835de9c31605f7eb7a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 14:03:45 +0100 Subject: [PATCH 406/653] Use c++ class for fixed interpolator --- yt/utilities/lib/fixed_interpolator.c | 443 ------------------ yt/utilities/lib/fixed_interpolator.cpp | 150 ++++++ ..._interpolator.h => fixed_interpolator.hpp} | 0 yt/utilities/lib/fixed_interpolator.pxd | 2 +- 4 files changed, 151 insertions(+), 444 deletions(-) delete mode 100644 yt/utilities/lib/fixed_interpolator.c create mode 100644 yt/utilities/lib/fixed_interpolator.cpp rename yt/utilities/lib/{fixed_interpolator.h => fixed_interpolator.hpp} (100%) diff --git a/yt/utilities/lib/fixed_interpolator.c b/yt/utilities/lib/fixed_interpolator.c deleted file mode 100644 index 93fb21d0330..00000000000 --- a/yt/utilities/lib/fixed_interpolator.c +++ /dev/null @@ -1,443 +0,0 @@ -/******************************************************************************* -*******************************************************************************/ - -// -// A small, tiny, itty bitty module for computation-intensive interpolation -// that I can't seem to make fast in Cython -// - -#include "fixed_interpolator.h" - -#define VINDEX(A,B,C) data[((((A)+ci[0])*(ds[1]+1)+((B)+ci[1]))*(ds[2]+1)+ci[2]+(C))] -// (((C*ds[1])+B)*ds[0]+A) -#define OINDEX(A,B,C) data[(A)*(ds[1]+1)*(ds[2]+1)+(B)*ds[2]+(B)+(C)] - -npy_float64 fast_interpolate(int ds[3], int ci[3], npy_float64 dp[3], - npy_float64 *data) -{ - int i; - npy_float64 dv, dm[3]; - for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]); - dv = 0.0; - dv += VINDEX(0,0,0) * (dm[0]*dm[1]*dm[2]); - dv += VINDEX(0,0,1) * (dm[0]*dm[1]*dp[2]); - dv += VINDEX(0,1,0) * (dm[0]*dp[1]*dm[2]); - dv += VINDEX(0,1,1) * (dm[0]*dp[1]*dp[2]); - dv += VINDEX(1,0,0) * (dp[0]*dm[1]*dm[2]); - dv += VINDEX(1,0,1) * (dp[0]*dm[1]*dp[2]); - dv += VINDEX(1,1,0) * (dp[0]*dp[1]*dm[2]); - dv += VINDEX(1,1,1) * (dp[0]*dp[1]*dp[2]); - /*assert(dv < -20);*/ - return dv; -} - -npy_float64 offset_interpolate(int ds[3], npy_float64 dp[3], npy_float64 *data) -{ - int i; - npy_float64 dv, vz[4]; - - dv = 1.0 - dp[2]; - vz[0] = dv*OINDEX(0,0,0) + dp[2]*OINDEX(0,0,1); - vz[1] = dv*OINDEX(0,1,0) + dp[2]*OINDEX(0,1,1); - vz[2] = dv*OINDEX(1,0,0) + dp[2]*OINDEX(1,0,1); - vz[3] = dv*OINDEX(1,1,0) + dp[2]*OINDEX(1,1,1); - - dv = 1.0 - dp[1]; - vz[0] = dv*vz[0] + dp[1]*vz[1]; - vz[1] = dv*vz[2] + dp[1]*vz[3]; - - dv = 1.0 - dp[0]; - vz[0] = dv*vz[0] + dp[0]*vz[1]; - - return vz[0]; -} - -void offset_fill(int ds[3], npy_float64 *data, npy_float64 gridval[8]) -{ - gridval[0] = OINDEX(0,0,0); - gridval[1] = OINDEX(1,0,0); - gridval[2] = OINDEX(1,1,0); - gridval[3] = OINDEX(0,1,0); - gridval[4] = OINDEX(0,0,1); - gridval[5] = OINDEX(1,0,1); - gridval[6] = OINDEX(1,1,1); - gridval[7] = OINDEX(0,1,1); -} - -void vertex_interp(npy_float64 v1, npy_float64 v2, npy_float64 isovalue, - npy_float64 vl[3], npy_float64 dds[3], - npy_float64 x, npy_float64 y, npy_float64 z, - int vind1, int vind2) -{ - /*if (fabs(isovalue - v1) < 0.000001) return 0.0; - if (fabs(isovalue - v2) < 0.000001) return 1.0; - if (fabs(v1 - v2) < 0.000001) return 0.0;*/ - int i; - static npy_float64 cverts[8][3] = - {{0,0,0}, {1,0,0}, {1,1,0}, {0,1,0}, - {0,0,1}, {1,0,1}, {1,1,1}, {0,1,1}}; - - npy_float64 mu = ((isovalue - v1) / (v2 - v1)); - - if (fabs(1.0 - isovalue/v1) < 0.000001) mu = 0.0; - if (fabs(1.0 - isovalue/v2) < 0.000001) mu = 1.0; - if (fabs(v1/v2) < 0.000001) mu = 0.0; - - vl[0] = x; vl[1] = y; vl[2] = z; - for (i=0;i<3;i++) - vl[i] += dds[i] * cverts[vind1][i] - + dds[i] * mu*(cverts[vind2][i] - cverts[vind1][i]); -} - -npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3], - npy_float64 *data) -{ - /* dims is one less than the dimensions of the array */ - int i; - npy_float64 dm[3], vz[4]; - //dp is the distance to the plane. dm is val, dp = 1-val - for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]); - - //First interpolate in z - vz[0] = dm[2]*VINDEX(0,0,0) + dp[2]*VINDEX(0,0,1); - vz[1] = dm[2]*VINDEX(0,1,0) + dp[2]*VINDEX(0,1,1); - vz[2] = dm[2]*VINDEX(1,0,0) + dp[2]*VINDEX(1,0,1); - vz[3] = dm[2]*VINDEX(1,1,0) + dp[2]*VINDEX(1,1,1); - - //Then in y - vz[0] = dm[1]*vz[0] + dp[1]*vz[1]; - vz[1] = dm[1]*vz[2] + dp[1]*vz[3]; - - //Then in x - vz[0] = dm[0]*vz[0] + dp[0]*vz[1]; - /*assert(dv < -20);*/ - return vz[0]; -} - -void eval_gradient(int ds[3], npy_float64 dp[3], - npy_float64 *data, npy_float64 *grad) -{ - // We just take some small value - - int i; - npy_float64 denom, plus, minus, backup, normval; - - normval = 0.0; - for (i = 0; i < 3; i++) { - backup = dp[i]; - grad[i] = 0.0; - if (dp[i] >= 0.95) {plus = dp[i]; minus = dp[i] - 0.05;} - else if (dp[i] <= 0.05) {plus = dp[i] + 0.05; minus = 0.0;} - else {plus = dp[i] + 0.05; minus = dp[i] - 0.05;} - //fprintf(stderr, "DIM: %d %0.3lf %0.3lf\n", i, plus, minus); - denom = plus - minus; - dp[i] = plus; - grad[i] += offset_interpolate(ds, dp, data) / denom; - dp[i] = minus; - grad[i] -= offset_interpolate(ds, dp, data) / denom; - dp[i] = backup; - normval += grad[i]*grad[i]; - } - if (normval != 0.0){ - normval = sqrt(normval); - for (i = 0; i < 3; i++) grad[i] /= -normval; - //fprintf(stderr, "Normval: %0.3lf %0.3lf %0.3lf %0.3lf\n", - // normval, grad[0], grad[1], grad[2]); - }else{ - grad[0]=grad[1]=grad[2]=0.0; - } -} - -/* -int edge_table[256]={ -0x0 , 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, -0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, 0xd03, 0xe09, 0xf00, -0x190, 0x99 , 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, -0x99c, 0x895, 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90, -0x230, 0x339, 0x33 , 0x13a, 0x636, 0x73f, 0x435, 0x53c, -0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30, -0x3a0, 0x2a9, 0x1a3, 0xaa , 0x7a6, 0x6af, 0x5a5, 0x4ac, -0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0, -0x460, 0x569, 0x663, 0x76a, 0x66 , 0x16f, 0x265, 0x36c, -0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, 0xa69, 0xb60, -0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff , 0x3f5, 0x2fc, -0xdfc, 0xcf5, 0xfff, 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0, -0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55 , 0x15c, -0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950, -0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, 0x2cf, 0x1c5, 0xcc , -0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0, -0x8c0, 0x9c9, 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, -0xcc , 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, 0x7c0, -0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, -0x15c, 0x55 , 0x35f, 0x256, 0x55a, 0x453, 0x759, 0x650, -0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, -0x2fc, 0x3f5, 0xff , 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0, -0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, 0xd65, 0xc6c, -0x36c, 0x265, 0x16f, 0x66 , 0x76a, 0x663, 0x569, 0x460, -0xca0, 0xda9, 0xea3, 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, -0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa , 0x1a3, 0x2a9, 0x3a0, -0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, -0x53c, 0x435, 0x73f, 0x636, 0x13a, 0x33 , 0x339, 0x230, -0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, -0x69c, 0x795, 0x49f, 0x596, 0x29a, 0x393, 0x99 , 0x190, -0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, 0x80c, -0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0 }; - -int tri_table[256][16] = -{{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1}, -{3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1}, -{3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1}, -{3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1}, -{9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1}, -{9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1}, -{2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1}, -{8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1}, -{9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1}, -{4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1}, -{3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1}, -{1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1}, -{4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1}, -{4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1}, -{9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1}, -{5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1}, -{2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1}, -{9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1}, -{0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1}, -{2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1}, -{10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1}, -{4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1}, -{5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1}, -{5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1}, -{9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1}, -{0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1}, -{1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1}, -{10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1}, -{8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1}, -{2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1}, -{7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1}, -{9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1}, -{2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1}, -{11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1}, -{9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1}, -{5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1}, -{11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1}, -{11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1}, -{1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1}, -{9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1}, -{5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1}, -{2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1}, -{0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1}, -{5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1}, -{6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1}, -{3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1}, -{6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1}, -{5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1}, -{1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1}, -{10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1}, -{6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1}, -{8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1}, -{7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1}, -{3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1}, -{5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1}, -{0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1}, -{9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1}, -{8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1}, -{5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1}, -{0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1}, -{6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1}, -{10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1}, -{10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1}, -{8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1}, -{1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1}, -{3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1}, -{0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1}, -{10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1}, -{3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1}, -{6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1}, -{9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1}, -{8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1}, -{3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1}, -{6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1}, -{0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1}, -{10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1}, -{10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1}, -{2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1}, -{7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1}, -{7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1}, -{2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1}, -{1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1}, -{11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1}, -{8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1}, -{0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1}, -{7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1}, -{10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1}, -{2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1}, -{6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1}, -{7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1}, -{2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1}, -{1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1}, -{10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1}, -{10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1}, -{0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1}, -{7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1}, -{6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1}, -{8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1}, -{9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1}, -{6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1}, -{4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1}, -{10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1}, -{8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1}, -{0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1}, -{1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1}, -{8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1}, -{10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1}, -{4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1}, -{10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1}, -{5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1}, -{11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1}, -{9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1}, -{6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1}, -{7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1}, -{3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1}, -{7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1}, -{9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1}, -{3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1}, -{6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1}, -{9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1}, -{1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1}, -{4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1}, -{7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1}, -{6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1}, -{3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1}, -{0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1}, -{6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1}, -{0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1}, -{11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1}, -{6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1}, -{5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1}, -{9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1}, -{1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1}, -{1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1}, -{10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1}, -{0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1}, -{5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1}, -{10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1}, -{11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1}, -{9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1}, -{7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1}, -{2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1}, -{8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1}, -{9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1}, -{9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1}, -{1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1}, -{9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1}, -{9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1}, -{5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1}, -{0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1}, -{10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1}, -{2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1}, -{0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1}, -{0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1}, -{9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1}, -{5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1}, -{3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1}, -{5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1}, -{8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1}, -{0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1}, -{9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1}, -{0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1}, -{1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1}, -{3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1}, -{4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1}, -{9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1}, -{11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1}, -{11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1}, -{2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1}, -{9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1}, -{3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1}, -{1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1}, -{4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1}, -{4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1}, -{0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1}, -{3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1}, -{3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1}, -{0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1}, -{9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1}, -{1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, -{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}}; -*/ diff --git a/yt/utilities/lib/fixed_interpolator.cpp b/yt/utilities/lib/fixed_interpolator.cpp new file mode 100644 index 00000000000..bffef80b621 --- /dev/null +++ b/yt/utilities/lib/fixed_interpolator.cpp @@ -0,0 +1,150 @@ +/******************************************************************************* +*******************************************************************************/ + +// +// A small, tiny, itty bitty module for computation-intensive interpolation +// that I can't seem to make fast in Cython +// + +#include "fixed_interpolator.hpp" + +#define VINDEX(A,B,C) data[((((A)+ci[0])*(ds[1]+1)+((B)+ci[1]))*(ds[2]+1)+ci[2]+(C))] +// (((C*ds[1])+B)*ds[0]+A) +#define OINDEX(A,B,C) data[(A)*(ds[1]+1)*(ds[2]+1)+(B)*ds[2]+(B)+(C)] + +npy_float64 fast_interpolate(int ds[3], int ci[3], npy_float64 dp[3], + npy_float64 *data) +{ + int i; + npy_float64 dv, dm[3]; + for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]); + dv = 0.0; + dv += VINDEX(0,0,0) * (dm[0]*dm[1]*dm[2]); + dv += VINDEX(0,0,1) * (dm[0]*dm[1]*dp[2]); + dv += VINDEX(0,1,0) * (dm[0]*dp[1]*dm[2]); + dv += VINDEX(0,1,1) * (dm[0]*dp[1]*dp[2]); + dv += VINDEX(1,0,0) * (dp[0]*dm[1]*dm[2]); + dv += VINDEX(1,0,1) * (dp[0]*dm[1]*dp[2]); + dv += VINDEX(1,1,0) * (dp[0]*dp[1]*dm[2]); + dv += VINDEX(1,1,1) * (dp[0]*dp[1]*dp[2]); + /*assert(dv < -20);*/ + return dv; +} + +npy_float64 offset_interpolate(int ds[3], npy_float64 dp[3], npy_float64 *data) +{ + int i; + npy_float64 dv, vz[4]; + + dv = 1.0 - dp[2]; + printf("%d %d %d", ds[0], ds[1], ds[2]); + vz[0] = dv*OINDEX(0,0,0) + dp[2]*OINDEX(0,0,1); + vz[1] = dv*OINDEX(0,1,0) + dp[2]*OINDEX(0,1,1); + vz[2] = dv*OINDEX(1,0,0) + dp[2]*OINDEX(1,0,1); + vz[3] = dv*OINDEX(1,1,0) + dp[2]*OINDEX(1,1,1); + + dv = 1.0 - dp[1]; + vz[0] = dv*vz[0] + dp[1]*vz[1]; + vz[1] = dv*vz[2] + dp[1]*vz[3]; + + dv = 1.0 - dp[0]; + vz[0] = dv*vz[0] + dp[0]*vz[1]; + + return vz[0]; +} + +void offset_fill(int ds[3], npy_float64 *data, npy_float64 gridval[8]) +{ + gridval[0] = OINDEX(0,0,0); + gridval[1] = OINDEX(1,0,0); + gridval[2] = OINDEX(1,1,0); + gridval[3] = OINDEX(0,1,0); + gridval[4] = OINDEX(0,0,1); + gridval[5] = OINDEX(1,0,1); + gridval[6] = OINDEX(1,1,1); + gridval[7] = OINDEX(0,1,1); +} + +void vertex_interp(npy_float64 v1, npy_float64 v2, npy_float64 isovalue, + npy_float64 vl[3], npy_float64 dds[3], + npy_float64 x, npy_float64 y, npy_float64 z, + int vind1, int vind2) +{ + /*if (fabs(isovalue - v1) < 0.000001) return 0.0; + if (fabs(isovalue - v2) < 0.000001) return 1.0; + if (fabs(v1 - v2) < 0.000001) return 0.0;*/ + int i; + static npy_float64 cverts[8][3] = + {{0,0,0}, {1,0,0}, {1,1,0}, {0,1,0}, + {0,0,1}, {1,0,1}, {1,1,1}, {0,1,1}}; + + npy_float64 mu = ((isovalue - v1) / (v2 - v1)); + + if (fabs(1.0 - isovalue/v1) < 0.000001) mu = 0.0; + if (fabs(1.0 - isovalue/v2) < 0.000001) mu = 1.0; + if (fabs(v1/v2) < 0.000001) mu = 0.0; + + vl[0] = x; vl[1] = y; vl[2] = z; + for (i=0;i<3;i++) + vl[i] += dds[i] * cverts[vind1][i] + + dds[i] * mu*(cverts[vind2][i] - cverts[vind1][i]); +} + +npy_float64 trilinear_interpolate(int ds[3], int ci[3], npy_float64 dp[3], + npy_float64 *data) +{ + /* dims is one less than the dimensions of the array */ + int i; + npy_float64 dm[3], vz[4]; + //dp is the distance to the plane. dm is val, dp = 1-val + for(i=0;i<3;i++)dm[i] = (1.0 - dp[i]); + + //First interpolate in z + vz[0] = dm[2]*VINDEX(0,0,0) + dp[2]*VINDEX(0,0,1); + vz[1] = dm[2]*VINDEX(0,1,0) + dp[2]*VINDEX(0,1,1); + vz[2] = dm[2]*VINDEX(1,0,0) + dp[2]*VINDEX(1,0,1); + vz[3] = dm[2]*VINDEX(1,1,0) + dp[2]*VINDEX(1,1,1); + + //Then in y + vz[0] = dm[1]*vz[0] + dp[1]*vz[1]; + vz[1] = dm[1]*vz[2] + dp[1]*vz[3]; + + //Then in x + vz[0] = dm[0]*vz[0] + dp[0]*vz[1]; + /*assert(dv < -20);*/ + return vz[0]; +} + +void eval_gradient(int ds[3], npy_float64 dp[3], + npy_float64 *data, npy_float64 *grad) +{ + // We just take some small value + + int i; + npy_float64 denom, plus, minus, backup, normval; + + normval = 0.0; + for (i = 0; i < 3; i++) { + backup = dp[i]; + grad[i] = 0.0; + if (dp[i] >= 0.95) {plus = dp[i]; minus = dp[i] - 0.05;} + else if (dp[i] <= 0.05) {plus = dp[i] + 0.05; minus = 0.0;} + else {plus = dp[i] + 0.05; minus = dp[i] - 0.05;} + //fprintf(stderr, "DIM: %d %0.3lf %0.3lf\n", i, plus, minus); + denom = plus - minus; + dp[i] = plus; + grad[i] += offset_interpolate(ds, dp, data) / denom; + dp[i] = minus; + grad[i] -= offset_interpolate(ds, dp, data) / denom; + dp[i] = backup; + normval += grad[i]*grad[i]; + } + if (normval != 0.0){ + normval = sqrt(normval); + for (i = 0; i < 3; i++) grad[i] /= -normval; + //fprintf(stderr, "Normval: %0.3lf %0.3lf %0.3lf %0.3lf\n", + // normval, grad[0], grad[1], grad[2]); + }else{ + grad[0]=grad[1]=grad[2]=0.0; + } +} \ No newline at end of file diff --git a/yt/utilities/lib/fixed_interpolator.h b/yt/utilities/lib/fixed_interpolator.hpp similarity index 100% rename from yt/utilities/lib/fixed_interpolator.h rename to yt/utilities/lib/fixed_interpolator.hpp diff --git a/yt/utilities/lib/fixed_interpolator.pxd b/yt/utilities/lib/fixed_interpolator.pxd index f950367318e..a5d0d19e980 100644 --- a/yt/utilities/lib/fixed_interpolator.pxd +++ b/yt/utilities/lib/fixed_interpolator.pxd @@ -9,7 +9,7 @@ Fixed interpolator includes cimport numpy as np -cdef extern from "fixed_interpolator.h": +cdef extern from "fixed_interpolator.hpp": np.float64_t fast_interpolate(int ds[3], int ci[3], np.float64_t dp[3], np.float64_t *data) nogil np.float64_t offset_interpolate(int ds[3], np.float64_t dp[3], From d3acbe8741d64bf7321fc257b5c3dab46b9bcfcf Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 14:05:06 +0100 Subject: [PATCH 407/653] Route call depending on backend (KDtree or Octree) --- yt/utilities/lib/image_samplers.pxd | 1 + yt/utilities/lib/image_samplers.pyx | 158 ++++++++++++++---- yt/utilities/lib/pyoctree_raytracing.py | 4 + .../volume_rendering/render_source.py | 61 ++++--- yt/visualization/volume_rendering/utils.py | 5 +- 5 files changed, 172 insertions(+), 57 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pxd b/yt/utilities/lib/image_samplers.pxd index fb404fc4e36..8e3a3faf18d 100644 --- a/yt/utilities/lib/image_samplers.pxd +++ b/yt/utilities/lib/image_samplers.pxd @@ -53,6 +53,7 @@ cdef class ImageSampler: cdef void *supp_data cdef np.float64_t width[3] cdef public object lens_type + cdef public str volume_method cdef calculate_extent_function *extent_function cdef generate_vector_info_function *vector_function cdef void setup(self, PartitionedGrid pg) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 978445b0751..81ddd21497d 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -15,41 +15,16 @@ import numpy as np cimport cython cimport lenses -cimport numpy as np -from field_interpolation_tables cimport ( - FieldInterpolationTable, - FIT_eval_transfer, - FIT_eval_transfer_with_light, - FIT_initialize_table, -) -from libc.math cimport ( - M_PI, - acos, - asin, - atan, - atan2, - cos, - exp, - fabs, - floor, - log2, - sin, - sqrt, -) -from libc.stdlib cimport abs, calloc, free, malloc - -from yt.utilities.lib.fp_utils cimport fclip, fmax, fmin, i64clip, iclip, imax, imin - -from .fixed_interpolator cimport ( - eval_gradient, - fast_interpolate, - offset_fill, - offset_interpolate, - trilinear_interpolate, - vertex_interp, -) -from .grid_traversal cimport walk_volume +from .grid_traversal cimport walk_volume, sampler_function +from .fixed_interpolator cimport \ + offset_interpolate, \ + fast_interpolate, \ + trilinear_interpolate, \ + eval_gradient, \ + offset_fill, \ + vertex_interp +from .cyoctree_raytracing cimport CythonOctreeRayTracing, RayInfo cdef extern from "platform_dep.h": long int lrint(double x) nogil @@ -88,6 +63,10 @@ cdef class ImageSampler: *args, **kwargs): cdef int i + self.volume_method = kwargs.pop('volume_method', None) + if self.volume_method and self.volume_method not in ('KDTree', 'Octree'): + raise NotImplementedError( + 'Invalid volume method "%s".' % self.svolume_method) camera_data = kwargs.pop("camera_data", None) if camera_data is not None: self.camera_data = camera_data @@ -146,15 +125,20 @@ cdef class ImageSampler: for i in range(3): self.width[i] = width[i] + def __call__(self, PartitionedGrid pg, **kwa): + if self.volume_method == 'KDTree': + return self.cast_through_kdtree(pg, **kwa) + elif self.volume_method == 'Octree': + return self.cast_through_octree(pg, **kwa) + @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def __call__(self, PartitionedGrid pg, int num_threads = 0): + def cast_through_kdtree(self, PartitionedGrid pg, int num_threads = 0): # This routine will iterate over all of the vectors and cast each in # turn. Might benefit from a more sophisticated intersection check, # like http://courses.csusm.edu/cs697exz/ray_box.htm cdef int vi, vj, hit, i, j - cdef np.int64_t iter[4] cdef VolumeContainer *vc = pg.container self.setup(pg) cdef np.float64_t *v_pos @@ -162,6 +146,7 @@ cdef class ImageSampler: cdef np.float64_t max_t hit = 0 cdef np.int64_t nx, ny, size + cdef np.int64_t iter[4] self.extent_function(self, vc, iter) iter[0] = i64clip(iter[0]-1, 0, self.nv[0]) iter[1] = i64clip(iter[1]+1, 0, self.nv[0]) @@ -202,6 +187,107 @@ cdef class ImageSampler: free(v_dir) return hit + @cython.boundscheck(False) + @cython.wraparound(False) + @cython.cdivision(True) + def cast_through_octree(self, PartitionedGrid pg, CythonOctreeRayTracing oct, int num_threads = 0): + cdef RayInfo[int]** ret + cdef RayInfo[int]* ri + self.setup(pg) + + cdef sampler_function* sampler = self.sample + + cdef np.int64_t nx, ny, size + cdef np.int64_t iter[4] + cdef VolumeContainer *vc # = pg.container + cdef ImageAccumulator *idata + + self.extent_function(self, pg.container, iter) + iter[0] = i64clip(iter[0]-1, 0, self.nv[0]) + iter[1] = i64clip(iter[1]+1, 0, self.nv[0]) + iter[2] = i64clip(iter[2]-1, 0, self.nv[1]) + iter[3] = i64clip(iter[3]+1, 0, self.nv[1]) + nx = (iter[1] - iter[0]) + ny = (iter[3] - iter[2]) + size = nx * ny + + cdef np.float64_t* vp_pos_ptr = &self.vp_pos[0,0,0] + cdef np.float64_t* vp_dir_ptr = &self.vp_dir[0,0,0] + + ret = oct.oct.cast_rays(vp_pos_ptr, vp_dir_ptr, size) + + cdef int* cell_ind + cdef double* tval + + cdef int i, j, k, vi, vj, icell + cdef int[3] index = [0, 0, 0] + cdef int chunksize = 100 + + cdef int n_fields = pg.container.n_fields + + with nogil, parallel(num_threads=num_threads): + idata = malloc(sizeof(ImageAccumulator)) + idata.supp_data = self.supp_data + + vc = malloc(sizeof(VolumeContainer)) + vc.n_fields = 1 + vc.data = malloc(sizeof(np.float64_t*)) + vc.mask = malloc(8*sizeof(np.uint8_t)) + # The actual dimensions are 2x2x2, but the sampler + # assumes vertex-centred data for a 1x1x1 lattice (i.e. + # 2^3 vertices) + vc.dims[0] = 1 + vc.dims[1] = 1 + vc.dims[2] = 1 + for j in prange(size, schedule='static', chunksize=chunksize): + vj = j % ny + vi = (j - vj) / ny + + # Contains the ordered indices of the cells hit by the ray + # and the entry/exit t values + ri = ret[j] + if ri.keys.size() == 0: + continue + + for i in range(Nch): + idata.rgba[i] = 0 + for i in range(8): + vc.mask[i] = 1 + + # Iterate over cells + for i in range(ri.keys.size()): + icell = ri.keys[i] + for k in range(n_fields): + vc.data[k] = &pg.container.data[k][8*icell] + + # Fill the volume container with the current boundaries + for k in range(3): + vc.left_edge[k] = pg.container.left_edge[3*icell+k] + vc.right_edge[k] = pg.container.right_edge[3*icell+k] + vc.dds[k] = (vc.right_edge[k] - vc.left_edge[k]) + vc.idds[k] = 1/vc.dds[k] + # Now call the sampler + sampler( + vc, + &self.vp_pos[vi, vj, 0], + &self.vp_dir[vi, vj, 0], + ri.t[2*i ], + ri.t[2*i+1], + index, + idata + ) + + for i in range(Nch): + idata.rgba[i] = 0 + free(vc.data) + free(vc) + free(idata) + # Free memory + for j in range(size): + free(ret[j]) + free(ret) + pass + cdef void setup(self, PartitionedGrid pg): return diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 1587eac0aac..68b54774ead 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -14,6 +14,10 @@ class OctreeRayTracing(object): log_fields = None fields = None + # Internal data + _cell_index = None + _tvalues = None + def __init__(self, data_source): self.data_source = data_source LE = np.array([0, 0, 0], dtype=np.float64) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 57395fd53b5..90ea00b1863 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -10,6 +10,7 @@ from yt.utilities.amr_kdtree.api import AMRKDTree from yt.utilities.lib.bounding_volume_hierarchy import BVH from yt.utilities.lib.misc_utilities import zlines, zpoints +from yt.utilities.lib.partitioned_grid import PartitionedGrid from yt.utilities.lib.pyoctree_raytracing import OctreeRayTracing from yt.utilities.on_demand_imports import NotAModule from yt.utilities.parallel_tools.parallel_analysis_interface import ( @@ -124,6 +125,7 @@ def create_volume_source(data_source, field): elif issubclass(index_class, OctreeIndex): return OctreeVolumeSource(data_source, field) + class VolumeSource(RenderSource): """A class for rendering data from a volumetric data source @@ -170,6 +172,7 @@ class VolumeSource(RenderSource): _image = None data_source = None + volume_method = None def __init__(self, data_source, field): r"""Initialize a new volumetric source for rendering.""" @@ -471,6 +474,8 @@ def __repr__(self): class KDTreeVolumeSource(VolumeSource): + volume_method = "KDTree" + def _get_volume(self): """The abstract volume associated with this VolumeSource @@ -533,6 +538,8 @@ def render(self, camera, zbuffer=None): class OctreeVolumeSource(VolumeSource): + volume_method = "Octree" + def __init__(self, *args, **kwa): super(OctreeVolumeSource, self).__init__(*args, **kwa) self.set_use_ghost_zones(True) @@ -547,6 +554,18 @@ def _get_volume(self): if self._volume is None: mylog.info("Creating volume") volume = OctreeRayTracing(self.data_source) + + data = self.data_source + ds = data.ds + + xyz = np.stack( + [data[_].to("unitary").value for _ in "x y z".split()], axis=-1 + ) + lvl = data["grid_level"].astype(np.int32).value + ds.parameters["levelmin"] + ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(np.int32) + + mylog.debug("Adding cells to volume") + volume.octree.add_nodes(ipos, lvl, np.arange(len(ipos), dtype=np.int32)) self._volume = volume return self._volume @@ -573,37 +592,39 @@ def render(self, camera, zbuffer=None): """ self.zbuffer = zbuffer self.set_sampler(camera) - assert (self.sampler is not None) + assert self.sampler is not None - mylog.debug("Casting rays") - if self.check_nans: - for brick in self.volume.bricks: - for data in brick.my_data: - if np.any(np.isnan(data)): - raise RuntimeError - - sampler_params = camera._get_sampler_params(self) + data = self.data_source + ds = data.ds - vp_pos = sampler_params['vp_pos'].to('unitary').value.reshape(-1, 3) - vp_dir = sampler_params['vp_dir'].value.reshape(-1, 3).copy() + dx = data["dx"].to("unitary").value[:, None] + xyz = np.stack([data[_].to("unitary").value for _ in "x y z".split()], axis=-1) + LE = xyz - dx / 2 + RE = xyz + dx / 2 - self.volume.cast_rays(vp_pos, vp_dir) - mylog.debug("Done casting rays") + mylog.debug("Gathering data") + # TODO: compute a 2x2x2, vertex-centred dataset + dt = np.stack([data[self.field] for _ in range(8)], axis=-1).reshape( + 1, -1, 8, 1 + ) + mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) + dims = np.array([1, 1, 1]) + pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims) - mylog.debug("Sample rays") - self.volume.sample(self.sampler) - mylog.debug("Done sampling") + mylog.debug("Casting rays") + self.sampler(pg, oct=self.volume.octree, num_threads=1) + mylog.debug("Done casting rays") - self.current_image = self.finalize_image( - camera, self.sampler.aimage) + self.current_image = self.finalize_image(camera, self.sampler.aimage) if zbuffer is None: self.zbuffer = ZBuffer( - self.current_image, - np.full(self.current_image.shape[:2], np.inf)) + self.current_image, np.full(self.current_image.shape[:2], np.inf) + ) return self.current_image + class MeshSource(OpaqueSource): """A source for unstructured mesh data. diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 4a073a9d3f1..7d6f92bab03 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -66,7 +66,10 @@ def new_volume_render_sampler(camera, render_source): params["transfer_function"], params["num_samples"], ) - kwargs = {"lens_type": params["lens_type"]} + kwargs = { + "lens_type": params["lens_type"], + "volume_method": render_source.volume_method, + } if "camera_data" in params: kwargs["camera_data"] = params["camera_data"] if render_source.zbuffer is not None: From f7b1db8a7f62c539a7348d59330cd96885fd3ea4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 14:09:37 +0100 Subject: [PATCH 408/653] Only apply gathering from volume when using the KDTree approach --- yt/visualization/volume_rendering/render_source.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 90ea00b1863..e0149cc985c 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -458,8 +458,6 @@ def finalize_image(self, camera, image): image: :class:`yt.data_objects.image_array.ImageArray` instance A reference to an image to fill """ - if self._volume is not None: - image = self.volume.reduce_tree_images(image, camera.lens.viewpoint) image.shape = camera.resolution[0], camera.resolution[1], 4 # If the call is from VR, the image is rotated by 180 to get correct # up direction @@ -536,6 +534,13 @@ def render(self, camera, zbuffer=None): return self.current_image + def finalize_image(self, camera, image): + if self._volume is not None: + image = self.volume.reduce_tree_images( + image, camera.lens.viewpoint) + + return super(KDTreeVolumeSource, self).finalize_image(camera, image) + class OctreeVolumeSource(VolumeSource): volume_method = "Octree" From 3f1b4dcd53705e17dd8666aacaec22fed5de6b1d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 15:45:26 +0100 Subject: [PATCH 409/653] Pass the right data around --- yt/utilities/lib/fixed_interpolator.cpp | 1 - yt/utilities/lib/image_samplers.pyx | 26 ++++++++++++++----- yt/utilities/lib/partitioned_grid.pyx | 10 ++++--- .../volume_rendering/render_source.py | 11 ++++---- 4 files changed, 31 insertions(+), 17 deletions(-) diff --git a/yt/utilities/lib/fixed_interpolator.cpp b/yt/utilities/lib/fixed_interpolator.cpp index bffef80b621..9fffdc59c80 100644 --- a/yt/utilities/lib/fixed_interpolator.cpp +++ b/yt/utilities/lib/fixed_interpolator.cpp @@ -37,7 +37,6 @@ npy_float64 offset_interpolate(int ds[3], npy_float64 dp[3], npy_float64 *data) npy_float64 dv, vz[4]; dv = 1.0 - dp[2]; - printf("%d %d %d", ds[0], ds[1], ds[2]); vz[0] = dv*OINDEX(0,0,0) + dp[2]*OINDEX(0,0,1); vz[1] = dv*OINDEX(0,1,0) + dp[2]*OINDEX(0,1,1); vz[2] = dv*OINDEX(1,0,0) + dp[2]*OINDEX(1,0,1); diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 81ddd21497d..aa3d4d268c1 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -14,6 +14,12 @@ Image sampler definitions import numpy as np cimport cython +from libc.stdlib cimport malloc, free +from libc.math cimport sqrt +from yt.utilities.lib.fp_utils cimport imin, fclip, i64clip +from field_interpolation_tables cimport \ + FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\ + FIT_eval_transfer_with_light cimport lenses from .grid_traversal cimport walk_volume, sampler_function from .fixed_interpolator cimport \ @@ -225,6 +231,8 @@ cdef class ImageSampler: cdef int n_fields = pg.container.n_fields + cdef np.float64_t vp_dir_len + with nogil, parallel(num_threads=num_threads): idata = malloc(sizeof(ImageAccumulator)) idata.supp_data = self.supp_data @@ -243,6 +251,10 @@ cdef class ImageSampler: vj = j % ny vi = (j - vj) / ny + vp_dir_len = sqrt( + self.vp_dir[vi, vj, 0]**2 + + self.vp_dir[vi, vj, 1]**2 + + self.vp_dir[vi, vj, 2]**2) # Contains the ordered indices of the cells hit by the ray # and the entry/exit t values ri = ret[j] @@ -258,12 +270,12 @@ cdef class ImageSampler: for i in range(ri.keys.size()): icell = ri.keys[i] for k in range(n_fields): - vc.data[k] = &pg.container.data[k][8*icell] + vc.data[k] = &pg.container.data[k][14*icell] # Fill the volume container with the current boundaries for k in range(3): - vc.left_edge[k] = pg.container.left_edge[3*icell+k] - vc.right_edge[k] = pg.container.right_edge[3*icell+k] + vc.left_edge[k] = pg.container.data[0][14*icell+8+k] + vc.right_edge[k] = pg.container.data[0][14*icell+11+k] vc.dds[k] = (vc.right_edge[k] - vc.left_edge[k]) vc.idds[k] = 1/vc.dds[k] # Now call the sampler @@ -271,16 +283,16 @@ cdef class ImageSampler: vc, &self.vp_pos[vi, vj, 0], &self.vp_dir[vi, vj, 0], - ri.t[2*i ], - ri.t[2*i+1], + ri.t[2*i ]*vp_dir_len, + ri.t[2*i+1]*vp_dir_len, index, idata ) - for i in range(Nch): - idata.rgba[i] = 0 + self.image[vi, vj, i] = idata.rgba[i] free(vc.data) free(vc) + idata.supp_data = NULL free(idata) # Free memory for j in range(size): diff --git a/yt/utilities/lib/partitioned_grid.pyx b/yt/utilities/lib/partitioned_grid.pyx index b943b62e0db..0ed89f497db 100644 --- a/yt/utilities/lib/partitioned_grid.pyx +++ b/yt/utilities/lib/partitioned_grid.pyx @@ -28,7 +28,8 @@ cdef class PartitionedGrid: mask, np.ndarray[np.float64_t, ndim=1] left_edge, np.ndarray[np.float64_t, ndim=1] right_edge, - np.ndarray[np.int64_t, ndim=1] dims): + np.ndarray[np.int64_t, ndim=1] dims, + int n_fields = -1): # The data is likely brought in via a slice, so we copy it cdef np.ndarray[np.float64_t, ndim=3] tdata cdef np.ndarray[np.uint8_t, ndim=3] mask_data @@ -39,7 +40,10 @@ cdef class PartitionedGrid: self.container = \ malloc(sizeof(VolumeContainer)) cdef VolumeContainer *c = self.container # convenience - cdef int n_fields = len(data) + if n_fields == -1: + n_fields = len(data) + cdef int n_data = len(data) + c.n_fields = n_fields for i in range(3): c.left_edge[i] = left_edge[i] @@ -51,7 +55,7 @@ cdef class PartitionedGrid: self.source_mask = mask mask_data = mask c.data = malloc(sizeof(np.float64_t*) * n_fields) - for i in range(n_fields): + for i in range(n_data): tdata = data[i] c.data[i] = tdata.data c.mask = mask_data.data diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index e0149cc985c..4246f82d4c4 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -536,8 +536,7 @@ def render(self, camera, zbuffer=None): def finalize_image(self, camera, image): if self._volume is not None: - image = self.volume.reduce_tree_images( - image, camera.lens.viewpoint) + image = self.volume.reduce_tree_images(image, camera.lens.viewpoint) return super(KDTreeVolumeSource, self).finalize_image(camera, image) @@ -609,12 +608,12 @@ def render(self, camera, zbuffer=None): mylog.debug("Gathering data") # TODO: compute a 2x2x2, vertex-centred dataset - dt = np.stack([data[self.field] for _ in range(8)], axis=-1).reshape( - 1, -1, 8, 1 - ) + dt = np.stack( + [data[self.field].value for _ in range(8)] + [*LE.T, *RE.T], axis=-1 + ).reshape(1, len(dx), 14, 1) mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) dims = np.array([1, 1, 1]) - pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims) + pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1) mylog.debug("Casting rays") self.sampler(pg, oct=self.volume.octree, num_threads=1) From 555aba4bcf7c40f1dc9e01012adc4aca3193f0d4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 27 May 2020 17:51:48 +0100 Subject: [PATCH 410/653] First working version Still missing the vertex-centred data interpolation feature though --- yt/utilities/lib/image_samplers.pyx | 15 ++++------ yt/utilities/lib/pyoctree_raytracing.py | 28 ++++++++----------- .../volume_rendering/render_source.py | 6 ++-- 3 files changed, 19 insertions(+), 30 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index aa3d4d268c1..4791c213591 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -205,16 +205,11 @@ cdef class ImageSampler: cdef np.int64_t nx, ny, size cdef np.int64_t iter[4] - cdef VolumeContainer *vc # = pg.container + cdef VolumeContainer *vc cdef ImageAccumulator *idata - self.extent_function(self, pg.container, iter) - iter[0] = i64clip(iter[0]-1, 0, self.nv[0]) - iter[1] = i64clip(iter[1]+1, 0, self.nv[0]) - iter[2] = i64clip(iter[2]-1, 0, self.nv[1]) - iter[3] = i64clip(iter[3]+1, 0, self.nv[1]) - nx = (iter[1] - iter[0]) - ny = (iter[3] - iter[2]) + nx = self.nv[0] + ny = self.nv[1] size = nx * ny cdef np.float64_t* vp_pos_ptr = &self.vp_pos[0,0,0] @@ -283,8 +278,8 @@ cdef class ImageSampler: vc, &self.vp_pos[vi, vj, 0], &self.vp_dir[vi, vj, 0], - ri.t[2*i ]*vp_dir_len, - ri.t[2*i+1]*vp_dir_len, + ri.t[2*i ]/vp_dir_len, + ri.t[2*i+1]/vp_dir_len, index, idata ) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 68b54774ead..9fd7a834766 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -35,23 +35,17 @@ def __init__(self, data_source): def set_fields(self, fields, log_fields, no_ghost, force=False): if no_ghost: - raise NotImplementedError('Cannot use no ghost with Octree datasets') - new_fields = self.data_source._determine_fields(fields) - regenerate_data = self.fields is None or \ - len(self.fields) != len(new_fields) or \ - self.fields != new_fields or force - if not iterable(log_fields): - log_fields = [log_fields] - new_log_fields = list(log_fields) - self.fields = new_fields - - if self.log_fields is not None and not regenerate_data: - flip_log = list(map(operator.ne, self.log_fields, new_log_fields)) - else: - flip_log = [False] * len(new_log_fields) - self.log_fields = new_log_fields - - # TODO: cache data in the 3x3x3 neighbouring cells + raise NotImplementedError('Ghost zones are required with Octree datasets') + + assert len(fields) == 1 + fields = self.data_source._determine_fields(fields) + + for field, take_log in zip(fields, log_fields): + if take_log: + tmp = np.log10(self.data_source[field]) + else: + tmp = self.data_source[field] + self.data = [tmp]*8 def cast_rays(self, vp_pos, vp_dir): """Cast the rays through the oct. diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 4246f82d4c4..c7e59534d9f 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -608,9 +608,9 @@ def render(self, camera, zbuffer=None): mylog.debug("Gathering data") # TODO: compute a 2x2x2, vertex-centred dataset - dt = np.stack( - [data[self.field].value for _ in range(8)] + [*LE.T, *RE.T], axis=-1 - ).reshape(1, len(dx), 14, 1) + dt = np.stack(self.volume.data + [*LE.T, *RE.T], axis=-1).reshape( + 1, len(dx), 14, 1 + ) mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) dims = np.array([1, 1, 1]) pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1) From 5edcfd266c3c3aa56e38adf6d6c56151d020ffad Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 11:31:04 +0100 Subject: [PATCH 411/653] Now supports trilinear interpolation! --- yt/data_objects/octree_subset.py | 32 ++++++++++++++-- yt/utilities/lib/pyoctree_raytracing.py | 51 ++++++++++++++++--------- 2 files changed, 61 insertions(+), 22 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 78e1def558f..e2a609f4792 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -1,3 +1,4 @@ +import warnings from contextlib import contextmanager import numpy as np @@ -521,6 +522,34 @@ def select_particles(self, selector, x, y, z): mask = selector.select_points(x, y, z, 0.0) return mask + def get_vertex_centered_data(self, fields): + _old_api = isinstance(fields, (str, tuple)) + if _old_api: + message = ( + 'get_vertex_centered_data() requires list of fields, rather than ' + 'a single field as an argument.' + ) + warnings.warn(message, DeprecationWarning, stacklevel=2) + fields = [fields] + + # Make sure the field list has only unique entries + fields = list(set(fields)) + new_fields = {} + cg = self.retrieve_ghost_zones(1, fields) + for field in fields: + new_fields[field] = cg[field][1: ,1: ,1: ].copy() + np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field]) + np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field]) + np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field]) + np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field]) + np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field]) + np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field]) + np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field]) + np.multiply(new_fields[field], 0.125, new_fields[field]) + + if _old_api: + return new_fields[fields[0]] + return new_fields class OctreeSubsetBlockSlicePosition: def __init__(self, ind, block_slice): @@ -572,9 +601,6 @@ def dds(self): def clear_data(self): pass - def get_vertex_centered_data(self, *args, **kwargs): - raise NotImplementedError - @contextmanager def _field_parameter_state(self, field_parameters): yield self.block_slice.octree_subset._field_parameter_state(field_parameters) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 9fd7a834766..5f1e9275455 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -1,12 +1,7 @@ -from yt.funcs import iterable - from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing -from yt.utilities.amr_kdtree.amr_kdtree import _apply_log -import numpy as np - -import operator - +import numpy as np +from itertools import product class OctreeRayTracing(object): octree = None @@ -33,19 +28,42 @@ def __init__(self, data_source): ipos = np.floor(xyz * (1<<(ds.parameters['levelmax']))).astype(int) self.octree.add_nodes(ipos.astype(np.int32), lvl.astype(np.int32), np.arange(len(ipos), dtype=np.int32)) + def vertex_centered_data(self, field): + data_source = self.data_source + chunks = data_source.index._chunk(data_source, "spatial", ngz=1) + + finfo = data_source.ds._get_field_info(*field) + units = finfo.units + rv = data_source.ds.arr(np.zeros((2, 2, 2, data_source.ires.size), dtype="float64"), units) + ind = {(i, j, k): 0 for i, j, k in product(*[range(2)]*3)} + for chunk in chunks: + with data_source._chunked_read(chunk): + gz = data_source._current_chunk.objs[0] + gz.field_parameters = data_source.field_parameters + wogz = gz._base_grid + vertex_data = gz.get_vertex_centered_data([field])[field] + + for i, j, k in product(*[range(2)]*3): + ind[i, j, k] += wogz.select( + data_source.selector, + vertex_data[i:i+2, j:j+2, k:k+2, ...], + rv[i, j, k, :], ind[i, j, k]) + return rv + def set_fields(self, fields, log_fields, no_ghost, force=False): if no_ghost: raise NotImplementedError('Ghost zones are required with Octree datasets') assert len(fields) == 1 - fields = self.data_source._determine_fields(fields) + field = self.data_source._determine_fields(fields)[0] + take_log = log_fields[0] + vertex_data = self.vertex_centered_data(field) - for field, take_log in zip(fields, log_fields): - if take_log: - tmp = np.log10(self.data_source[field]) - else: - tmp = self.data_source[field] - self.data = [tmp]*8 + if take_log: + vertex_data = np.log10(vertex_data) + + # Vertex_data has shape (2, 2, 2, ...) + self.data = vertex_data.reshape(8, -1) def cast_rays(self, vp_pos, vp_dir): """Cast the rays through the oct. @@ -69,10 +87,5 @@ def cast_rays(self, vp_pos, vp_dir): self.octree.cast_rays(vp_pos, vp_dir) return self._cell_index, self._tvalues - # def sample(self, sampler): - # # TODO: Apply to sampler to each oct encountered by all rays. - # self.octree.sample(sampler, self._cell_index, self._tvalues) - def traverse(self, viewpoint): raise Exception() - self.octree.cast_rays() \ No newline at end of file From ec7c5f1b7515cb30b084d75e0c7c89f7ddae9153 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 11:31:17 +0100 Subject: [PATCH 412/653] Add caching to prevent recreation of costly ghost zones information --- yt/frontends/ramses/data_structures.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 4be5ba45766..ee3b9587ed5 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -276,14 +276,13 @@ def _fill_with_ghostzones( selector.count_octs(self.oct_handler, self.domain_id) * self.nz ** ndim ) - ( - levels, - cell_inds, - file_inds, - domains, - ) = self.oct_handler.file_index_octs_with_ghost_zones( - selector, self.domain_id, cell_count - ) + gz_cache = getattr(self, "_ghost_zone_cache", None) + if gz_cache: + levels, cell_inds, file_inds, domains = gz_cache + else: + gz_cache = levels, cell_inds, file_inds, domains = self.oct_handler.file_index_octs_with_ghost_zones( + selector, self.domain_id, cell_count) + self._ghost_zone_cache = gz_cache # Initializing data container for field in fields: @@ -349,9 +348,14 @@ def fill(self, fd, fields, selector, file_handler): ) def retrieve_ghost_zones(self, ngz, fields, smoothed=False): - new_subset = RAMSESDomainSubset( - self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self - ) + new_subset = getattr(self, '_subset_with_gz', None) + if not new_subset: + new_subset = RAMSESDomainSubset( + self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self + ) + else: + mylog.debug('Reusing previous subset with ghost zone for domain %s' % self.domain_id) + self._subset_with_gz = new_subset return new_subset From 7cc83e5b8f777dabd99262001c9a472d2a3a143a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 11:47:04 +0100 Subject: [PATCH 413/653] Remove TODO --- yt/utilities/lib/cyoctree_raytracing.pyx | 5 ++--- yt/visualization/volume_rendering/render_source.py | 8 ++++---- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index c8b4f4fe308..ca2938690ea 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -49,8 +49,6 @@ cdef class CythonOctreeRayTracing: if Nrays == 0: return - # print('Casting rays') - ret = self.oct.cast_rays(&o[0,0], &d[0,0], Nrays) cdef int* cell_ind @@ -63,8 +61,9 @@ cdef class CythonOctreeRayTracing: cdef double* t_ptr cdef int[3] index = [0, 0, 0] + # Only support square regions for the moment! nx = np.round(Nrays**0.5) - ny = nx # TODO: change this + ny = nx cdef int n_fields = pg.container.n_fields diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index c7e59534d9f..ca9cea35503 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -607,10 +607,10 @@ def render(self, camera, zbuffer=None): RE = xyz + dx / 2 mylog.debug("Gathering data") - # TODO: compute a 2x2x2, vertex-centred dataset - dt = np.stack(self.volume.data + [*LE.T, *RE.T], axis=-1).reshape( - 1, len(dx), 14, 1 - ) + dt = np.stack( + [_ for _ in self.volume.data] + + [*LE.T, *RE.T], + axis=-1).reshape(1, len(dx), 14, 1) mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) dims = np.array([1, 1, 1]) pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1) From 62fa6a1e4f83f2d65c6a2fb151bce9b59820277e Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 11:56:28 +0100 Subject: [PATCH 414/653] Flaking --- yt/utilities/lib/pyoctree_raytracing.py | 3 --- yt/visualization/volume_rendering/render_source.py | 1 - 2 files changed, 4 deletions(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 5f1e9275455..f5fd46056b8 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -86,6 +86,3 @@ def cast_rays(self, vp_pos, vp_dir): self._cell_index, self._tvalues = \ self.octree.cast_rays(vp_pos, vp_dir) return self._cell_index, self._tvalues - - def traverse(self, viewpoint): - raise Exception() diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index ca9cea35503..496348f97b0 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -599,7 +599,6 @@ def render(self, camera, zbuffer=None): assert self.sampler is not None data = self.data_source - ds = data.ds dx = data["dx"].to("unitary").value[:, None] xyz = np.stack([data[_].to("unitary").value for _ in "x y z".split()], axis=-1) From 865056c8dff28a9f759c087235837154305ecfd3 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 13:12:45 +0100 Subject: [PATCH 415/653] More flaking --- yt/visualization/volume_rendering/interactive_loop.py | 2 +- yt/visualization/volume_rendering/off_axis_projection.py | 2 +- yt/visualization/volume_rendering/render_source.py | 9 ++++----- yt/visualization/volume_rendering/shader_objects.py | 2 +- .../volume_rendering/tests/test_composite.py | 2 +- yt/visualization/volume_rendering/tests/test_lenses.py | 2 +- yt/visualization/volume_rendering/tests/test_points.py | 6 +++++- yt/visualization/volume_rendering/tests/test_scene.py | 2 +- yt/visualization/volume_rendering/tests/test_varia.py | 2 +- .../volume_rendering/tests/test_vr_orientation.py | 2 +- yt/visualization/volume_rendering/tests/test_zbuff.py | 2 +- 11 files changed, 18 insertions(+), 15 deletions(-) diff --git a/yt/visualization/volume_rendering/interactive_loop.py b/yt/visualization/volume_rendering/interactive_loop.py index 37c6e7040e3..48f92a70552 100644 --- a/yt/visualization/volume_rendering/interactive_loop.py +++ b/yt/visualization/volume_rendering/interactive_loop.py @@ -3,7 +3,7 @@ import cyglfw3 as glfw import numpy as np -from OpenGL import GL as GL +import OpenGL.GL as GL from yt import write_bitmap diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 6f43bda01c0..521aa83aee5 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -9,7 +9,7 @@ off_axis_projection_SPH, ) -from .render_source import VolumeSource +from .render_source import create_volume_source from .scene import Scene from .transfer_functions import ProjectionTransferFunction from .utils import data_source_or_all diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 496348f97b0..d4463b57714 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -606,16 +606,15 @@ def render(self, camera, zbuffer=None): RE = xyz + dx / 2 mylog.debug("Gathering data") - dt = np.stack( - [_ for _ in self.volume.data] + - [*LE.T, *RE.T], - axis=-1).reshape(1, len(dx), 14, 1) + dt = np.stack([_ for _ in self.volume.data] + [*LE.T, *RE.T], axis=-1).reshape( + 1, len(dx), 14, 1 + ) mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) dims = np.array([1, 1, 1]) pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1) mylog.debug("Casting rays") - self.sampler(pg, oct=self.volume.octree, num_threads=1) + self.sampler(pg, oct=self.volume.octree) mylog.debug("Done casting rays") self.current_image = self.finalize_image(camera, self.sampler.aimage) diff --git a/yt/visualization/volume_rendering/shader_objects.py b/yt/visualization/volume_rendering/shader_objects.py index 72c9a86638c..0f35a48f8d2 100644 --- a/yt/visualization/volume_rendering/shader_objects.py +++ b/yt/visualization/volume_rendering/shader_objects.py @@ -3,7 +3,7 @@ import os from collections import OrderedDict -from OpenGL import GL as GL +import OpenGL.GL as GL from yt.units.yt_array import YTQuantity from yt.utilities.exceptions import ( diff --git a/yt/visualization/volume_rendering/tests/test_composite.py b/yt/visualization/volume_rendering/tests/test_composite.py index baa3cc58d2c..57c2fbe272e 100644 --- a/yt/visualization/volume_rendering/tests/test_composite.py +++ b/yt/visualization/volume_rendering/tests/test_composite.py @@ -11,7 +11,7 @@ BoxSource, LineSource, Scene, - VolumeSource, + create_volume_source, ) diff --git a/yt/visualization/volume_rendering/tests/test_lenses.py b/yt/visualization/volume_rendering/tests/test_lenses.py index a72dc6d2d4b..ac8ab0c57a7 100644 --- a/yt/visualization/volume_rendering/tests/test_lenses.py +++ b/yt/visualization/volume_rendering/tests/test_lenses.py @@ -6,7 +6,7 @@ import numpy as np from yt.testing import fake_random_ds -from yt.visualization.volume_rendering.api import Scene, VolumeSource +from yt.visualization.volume_rendering.api import Scene, create_volume_source def setup(): diff --git a/yt/visualization/volume_rendering/tests/test_points.py b/yt/visualization/volume_rendering/tests/test_points.py index 4901be7ab79..9d2e944590f 100644 --- a/yt/visualization/volume_rendering/tests/test_points.py +++ b/yt/visualization/volume_rendering/tests/test_points.py @@ -6,7 +6,11 @@ import numpy as np from yt.testing import fake_random_ds -from yt.visualization.volume_rendering.api import PointSource, Scene, VolumeSource +from yt.visualization.volume_rendering.api import ( + PointSource, + Scene, + create_volume_source, +) def setup(): diff --git a/yt/visualization/volume_rendering/tests/test_scene.py b/yt/visualization/volume_rendering/tests/test_scene.py index c2754b45299..2f2177735ed 100644 --- a/yt/visualization/volume_rendering/tests/test_scene.py +++ b/yt/visualization/volume_rendering/tests/test_scene.py @@ -7,8 +7,8 @@ from yt.testing import assert_fname, fake_random_ds, fake_vr_orientation_test_ds from yt.visualization.volume_rendering.api import ( - VolumeSource, create_scene, + create_volume_source, volume_render, ) diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index ca4e33c11c6..9580e2cc0f5 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -7,7 +7,7 @@ import yt from yt.testing import fake_random_ds -from yt.visualization.volume_rendering.render_source import VolumeSource +from yt.visualization.volume_rendering.render_source import create_volume_source from yt.visualization.volume_rendering.scene import Scene diff --git a/yt/visualization/volume_rendering/tests/test_vr_orientation.py b/yt/visualization/volume_rendering/tests/test_vr_orientation.py index 93a2319b908..4f830db4c46 100644 --- a/yt/visualization/volume_rendering/tests/test_vr_orientation.py +++ b/yt/visualization/volume_rendering/tests/test_vr_orientation.py @@ -9,7 +9,7 @@ from yt.visualization.volume_rendering.api import ( ColorTransferFunction, Scene, - VolumeSource, + create_volume_source, off_axis_projection, ) diff --git a/yt/visualization/volume_rendering/tests/test_zbuff.py b/yt/visualization/volume_rendering/tests/test_zbuff.py index 496a99000eb..f5189fd807d 100644 --- a/yt/visualization/volume_rendering/tests/test_zbuff.py +++ b/yt/visualization/volume_rendering/tests/test_zbuff.py @@ -9,8 +9,8 @@ from yt.visualization.volume_rendering.api import ( OpaqueSource, Scene, - VolumeSource, ZBuffer, + create_volume_source, ) From 4aeca0f2c69aaadba1ed8d4f94be9fab46bcbaa9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 13:53:16 +0100 Subject: [PATCH 416/653] Removing debug stmts --- yt/utilities/lib/octree_raytracing.cpp | 64 ++------------------------ 1 file changed, 3 insertions(+), 61 deletions(-) diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 743f3c63623..5d7a44d64a9 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -12,14 +12,6 @@ typedef double F; -void print_with_prefix(std::string s, int count, char c='\t') { - for (auto i = 0; i < count; ++i) std::cout << c; - std::cout << s << std::endl; -} - -const bool debug = false; -const bool debug_ray = false; - /* A simple node struct that contains a key and a fixed number of children, typically Nchildren = 2**Ndim */ @@ -183,21 +175,14 @@ class Octree { Node* node = root; Node* child = nullptr; - if (debug )std::cerr << "Creating tree at position " << ipos[0] << " " << ipos[1] << " " << ipos[2] << ", level=" << lvl << "/" << maxDepth << std::endl; - // Go down the tree for (auto ibit = maxDepth-1; ibit >= maxDepth - lvl; --ibit) { - if (debug) std::cerr << "\tbit=" << ibit << std::endl; // Find children based on bits - if (debug) std::cerr << "\t\tbitmask: "; for (auto idim = 0; idim < Ndim; ++idim) { bitMask[idim] = ijk[idim] & mask; - if (debug) std::cerr << bitMask[idim]; } - if (debug) std::cerr << std::endl; mask >>= 1; auto iflat = ijk2iflat(bitMask); - if (debug) std::cerr << "\t→ node[" << node->index << "].children[" << (int)iflat << "]" << std::endl; // Create child if it does not exist yet child = create_get_node(node, iflat); @@ -219,15 +204,10 @@ class Octree { void insert_node_no_ret(const int* ipos, const int lvl, keyType key) { Node* n = insert_node(ipos, lvl, key); - if (debug) { - std::cout << "Inserted node at position " << ipos[0] << " " << ipos[1] << " " << ipos[2]; - std::cout << " with key " << n->key << " and index " << n->index << std::endl; - } } // Perform multiple ray cast RayInfo** cast_rays(const F *origins, const F *directions, const int Nrays) { - // std::vector> *ray_infos = mallocnew std::vector>(Nrays); RayInfo **ray_infos = (RayInfo**)malloc(sizeof(RayInfo*)*Nrays); int Nfound = 0; #pragma omp parallel for @@ -237,11 +217,6 @@ class Octree { auto ri = ray_infos[i]; Ray r(&origins[3*i], &directions[3*i], -1e99, 1e99); cast_ray(&r, ri->keys, ri->t); - if (debug || debug_ray) { - std::cout << "Length of kv: " << ri->keys.size() << std::endl; - for (auto v: ri->keys) std::cout << v << " "; - std::cout << std::endl; - } Nfound = std::max(Nfound, (int) ri->keys.size()); } return ray_infos; @@ -249,11 +224,6 @@ class Octree { // Perform single ray tracing void cast_ray(Ray *r, keyVector &keyList, std::vector &tList) { - if (debug || debug_ray) { - std::cout << "Entering cast_ray | o=" << - r->o[0] << " " << r->o[1] << " " << r->o[2] << " | d = " << - r->d[0] << " " << r->d[1] << " " << r->d[2] << std::endl; - } // Boolean mask for direction unsigned char a = 0; unsigned char bmask = twotondim >> 1; @@ -280,7 +250,6 @@ class Octree { proc_subtree(t0[0], t0[1], t0[2], t1[0], t1[1], t1[2], root, a, keyList, tList); - if (debug || debug_ray) std::cout << "Leaving cast_ray" << std::endl; } private: @@ -294,13 +263,11 @@ class Octree { Node* create_get_node(Node* parent, int iflat) { // Create children if not already existing if (parent->children == nullptr) { - if (debug) std::cerr << "Allocating children for node " << parent->index << std::endl; parent->children = (Node**) malloc(sizeof(Node*)*twotondim); for (auto i = 0; i < twotondim; ++i) parent->children[i] = nullptr; } if (parent->children[iflat] == nullptr) { - if (debug) std::cerr << "Creating node[" << parent->index << "].children[" << iflat << "]" << std::endl; Node* node = new Node(); node->level = parent->level + 1; node->index = global_index; @@ -336,22 +303,14 @@ class Octree { const F tx1, const F ty1, const F tz1, const Node *n, const unsigned char a, keyVector &keyList, std::vector &tList, int lvl=0) { - if ((debug || debug_ray) && n) print_with_prefix("Entering proc_subtree in node " + std::to_string(n->index), lvl); // Check if exit face is not in our back - if (tx1 < 0 || ty1 < 0 || tz1 < 0) { - if ((debug || debug_ray)) print_with_prefix("Leaving because tx1|ty1|tz1<0", lvl); - return; - } + if (tx1 < 0 || ty1 < 0 || tz1 < 0) return; // Exit if the node is null (happens if it hasn't been added to the tree) - if (!n) { - if ((debug || debug_ray)) print_with_prefix("Leaving because node is null", lvl); - return; - } + if (!n) return; // Process leaf node if (n->terminal) { - if (debug || debug_ray) print_with_prefix("Inserting node in keyList (index=" + std::to_string(n->key) + ")", lvl); keyList.push_back(n->key); // Push entry & exit t tList.push_back(std::max(std::max(tx0, ty0), tz0)); @@ -361,10 +320,7 @@ class Octree { } // Early break for leafs without children - if (n->children == nullptr) { - if ((debug || debug_ray)) print_with_prefix("Leaving because no children", lvl); - return; - } + if (n->children == nullptr) return; // Compute middle intersection F txm, tym, tzm; @@ -373,7 +329,6 @@ class Octree { tzm = (tz0 + tz1) * 0.5; unsigned char iNode = first_node(tx0, ty0, tz0, txm, tym, tzm); - if (debug || debug_ray) print_with_prefix("First node: " + std::to_string(int(iNode)), lvl); // Iterate over children do { @@ -382,51 +337,38 @@ class Octree { { case 0: proc_subtree(tx0, ty0, tz0, txm, tym, tzm, n->children[a], a, keyList, tList, lvl+1); - if (debug || debug_ray) { - // do something - } iNode = next_node(txm, tym, tzm, 4, 2, 1); - if (debug || debug_ray) print_with_prefix("From 0 to " + std::to_string(iNode), lvl); break; case 1: proc_subtree(tx0, ty0, tzm, txm, tym, tz1, n->children[1^a], a, keyList, tList, lvl+1); iNode = next_node(txm, tym, tz1, 5, 3, 8); - if (debug || debug_ray) print_with_prefix("From 1 to " + std::to_string(iNode), lvl); break; case 2: proc_subtree(tx0, tym, tz0, txm, ty1, tzm, n->children[2^a], a, keyList, tList, lvl+1); iNode = next_node(txm, ty1, tzm, 6, 8, 3); - if (debug || debug_ray) print_with_prefix("From 2 to " + std::to_string(iNode), lvl); break; case 3: proc_subtree(tx0, tym, tzm, txm, ty1, tz1, n->children[3^a], a, keyList, tList, lvl+1); iNode = next_node(txm, ty1, tz1, 7, 8, 8); - if (debug || debug_ray) print_with_prefix("From 3 to " + std::to_string(iNode), lvl); break; case 4: proc_subtree(txm, ty0, tz0, tx1, tym, tzm, n->children[4^a], a, keyList, tList, lvl+1); iNode = next_node(tx1, tym, tzm, 8, 6, 5); - if (debug || debug_ray) print_with_prefix("From 4 to " + std::to_string(iNode), lvl); break; case 5: proc_subtree(txm, ty0, tzm, tx1, tym, tz1, n->children[5^a], a, keyList, tList, lvl+1); iNode = next_node(tx1, tym, tz1, 8, 7, 8); - if (debug || debug_ray) print_with_prefix("From 5 to " + std::to_string(iNode), lvl); break; case 6: proc_subtree(txm, tym, tz0, tx1, ty1, tzm, n->children[6^a], a, keyList, tList, lvl+1); iNode = next_node(tx1, ty1, tzm, 8, 8, 7); - if (debug || debug_ray) print_with_prefix("From 6 to " + std::to_string(iNode), lvl); break; case 7: proc_subtree(txm, tym, tzm, tx1, ty1, tz1, n->children[7^a], a, keyList, tList, lvl+1); iNode = 8; - if (debug || debug_ray) print_with_prefix("From 7 to " + std::to_string(iNode), lvl); break; } } while (iNode < twotondim); - - if (debug || debug_ray) print_with_prefix("Leaving proc_subtree", lvl); } // From "An Efficient Parametric Algorithm for Octree Traversal" by Revelles, Urena, & Lastra From ad726cf41e3e684473f9c002bd9ed1b2a97f8ba8 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 28 May 2020 15:22:30 +0100 Subject: [PATCH 417/653] Use math.h instead of cmath in an attempt to solve compilation issues --- yt/utilities/lib/octree_raytracing.cpp | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 5d7a44d64a9..5568a52ca5c 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -4,7 +4,7 @@ #include #include #include -#include +#include #include #include #include @@ -57,7 +57,7 @@ struct Ray { for (auto idim = 0; idim < Ndim; ++idim) { dd += _d[idim] * _d[idim]; } - dd = std::sqrt(dd); + dd = sqrt(dd); for (auto idim = 0; idim < Ndim; ++idim) { d[idim] = _d[idim] / dd; } @@ -71,7 +71,7 @@ struct Ray { for (auto idim = 0; idim < Ndim; ++idim) { dd += _d[idim] * _d[idim]; } - dd = std::sqrt(dd); + dd = sqrt(dd); for (auto idim = 0; idim < Ndim; ++idim) { d[idim] = _d[idim] / dd; } @@ -465,7 +465,7 @@ void test3(){ rr[1] = ry; rr[2] = rz; Ray<3> r(oo, rr, -1e99, 1e99); - std::cerr<< "Casting ray in direction:\t" << rx << ", " << ry << ", " << rz << "(len=" << std::sqrt(r.d[0]*r.d[0] + r.d[1]*r.d[1] + r.d[2]*r.d[2]) << ")" <> ret; From edf8d137ea64a1fcb70c239a18755859436b4a29 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 29 May 2020 14:58:10 +0100 Subject: [PATCH 418/653] Correct cell ordering between Fortran/C code --- yt/utilities/lib/pyoctree_raytracing.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index f5fd46056b8..b20d48dac9f 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -63,7 +63,9 @@ def set_fields(self, fields, log_fields, no_ghost, force=False): vertex_data = np.log10(vertex_data) # Vertex_data has shape (2, 2, 2, ...) - self.data = vertex_data.reshape(8, -1) + # Note: here we have the wrong ordering within the oct (classical Fortran/C + # ordering issue) so we need to swap axis 0 and 2. + self.data = vertex_data.swapaxes(0, 2).reshape(8, -1) def cast_rays(self, vp_pos, vp_dir): """Cast the rays through the oct. From 9aa55172f8bad09304fa0c5468b907419d6b1fc5 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 29 May 2020 16:37:17 +0100 Subject: [PATCH 419/653] Remove useless code + give some hints to the compiler --- yt/utilities/lib/cyoctree_raytracing.pxd | 1 - yt/utilities/lib/cyoctree_raytracing.pyx | 89 ----------- yt/utilities/lib/octree_raytracing.cpp | 180 ++--------------------- 3 files changed, 9 insertions(+), 261 deletions(-) diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/cyoctree_raytracing.pxd index 410aa3ba35e..c45b1b27169 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pxd +++ b/yt/utilities/lib/cyoctree_raytracing.pxd @@ -21,7 +21,6 @@ cdef extern from "octree_raytracing.cpp": vector[double] t cdef cppclass Octree3D[T]: - Octree3D(int depth, double* size) Octree3D(int depth, double* LE, double* RE) void insert_node_no_ret(const int* ipos, const int lvl, T key) RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index ca2938690ea..c38e2d55809 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -37,94 +37,5 @@ cdef class CythonOctreeRayTracing: ii[2] = ipos_view[i, 2] self.oct.insert_node_no_ret(ii, lvl_view[i], key[i]) - @cython.boundscheck(False) - @cython.wraparound(False) - def cast_rays(self, double[:, ::1] o, double[:, ::1] d, ImageSampler sampler, PartitionedGrid pg, int num_threads = 0): - cdef RayInfo[int]** ret - cdef int Nrays = len(o) - cdef RayInfo[int]* ri - - cdef sampler_function* sample = sampler.sample - - if Nrays == 0: - return - - ret = self.oct.cast_rays(&o[0,0], &d[0,0], Nrays) - - cdef int* cell_ind - cdef double* tval - - cdef int i, j, k, vi, vj, nx, ny, icell - cdef VolumeContainer *vc - cdef ImageAccumulator *idata - cdef int* key_ptr - cdef double* t_ptr - cdef int[3] index = [0, 0, 0] - - # Only support square regions for the moment! - nx = np.round(Nrays**0.5) - ny = nx - - cdef int n_fields = pg.container.n_fields - - with nogil, parallel(num_threads=num_threads): - idata = malloc(sizeof(ImageAccumulator)) - vc = malloc(sizeof(VolumeContainer)) - vc.n_fields = 1 - vc.data = malloc(sizeof(np.float64_t*)) - vc.mask = malloc(8*sizeof(np.uint8_t)) - # The actual dimensions are 2x2x2, but the sampler - # assumes vertex-centred data for a 1x1x1 lattice (i.e. - # 2^3 vertices) - vc.dims[0] = 1 - vc.dims[1] = 1 - vc.dims[2] = 1 - for j in prange(Nrays, schedule='static'): - vj = j % ny - vi = (j - vj) / ny - ri = ret[j] - if ri.keys.size() == 0: - continue - - for i in range(Nch): - idata.rgba[i] = 0 - for i in range(8): - vc.mask[i] = 1 - key_ptr = &ri.keys[0] - t_ptr = &ri.t[0] - - # Iterate over cells - for i in range(ri.keys.size()): - icell = key_ptr[i] - for k in range(n_fields): - vc.data[k] = &pg.container.data[k][8*icell] - - # Fill the volume container - for k in range(3): - vc.left_edge[k] = pg.container.left_edge[3*icell+k] - vc.right_edge[k] = pg.container.right_edge[3*icell+k] - vc.dds[i] = (vc.right_edge[i] - vc.left_edge[i]) - vc.idds[i] = 1/vc.dds[i] - # Now call the sampler on the list of cells - sample( - vc, - &o[j, 0], - &d[j, 0], - t_ptr[2*i ], - t_ptr[2*i+1], - index, - idata - ) - - for i in range(Nch): - idata.rgba[i] = 0 - free(vc.data) - free(vc) - free(idata) - # Free memory - for i in range(Nrays): - free(ret[i]) - free(ret) - def __dealloc__(self): del self.oct \ No newline at end of file diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 5568a52ca5c..8b7b747853f 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -125,20 +125,6 @@ class Octree { int global_index = 0; public: - Octree(int _maxDepth, F* _size) : - twotondim (1<children = (Node**) malloc(sizeof(Node*)*twotondim); - for (auto i = 0; i < twotondim; ++i) root->children = nullptr; - - DLE.fill(0); - DRE = size; - } - Octree(int _maxDepth, F* _DLE, F* _DRE) : twotondim (1<** cast_rays(const F *origins, const F *directions, const int Nrays) { - RayInfo **ray_infos = (RayInfo**)malloc(sizeof(RayInfo*)*Nrays); + RayInfo **ray_infos = (RayInfo**) malloc(sizeof(RayInfo*)*Nrays); int Nfound = 0; - #pragma omp parallel for + #pragma omp parallel for shared(ray_infos, Nfound) schedule(static, 100) for (auto i = 0; i < Nrays; ++i) { std::vector tList; ray_infos[i] = new RayInfo(Nfound); auto ri = ray_infos[i]; Ray r(&origins[3*i], &directions[3*i], -1e99, 1e99); cast_ray(&r, ri->keys, ri->t); + + // Keep track of the number of cells hit to preallocate the next ray info container Nfound = std::max(Nfound, (int) ri->keys.size()); } return ray_infos; @@ -260,7 +248,7 @@ class Octree { This will create a new node as a child of the current one, or return an existing one if it already exists */ - Node* create_get_node(Node* parent, int iflat) { + Node* create_get_node(Node* parent, const int iflat) { // Create children if not already existing if (parent->children == nullptr) { parent->children = (Node**) malloc(sizeof(Node*)*twotondim); @@ -319,7 +307,7 @@ class Octree { return; } - // Early break for leafs without children + // Early break for leafs without children that aren't terminal if (n->children == nullptr) return; // Compute middle intersection @@ -381,7 +369,7 @@ class Octree { } else if (ty0 >= std::max(tx0, tz0)) { // enters XZ plane if (txm < ty0) index |= 0b100; if (tzm < ty0) index |= 0b001; - } else { // enters XY plane + } else { // enters XY plane if (txm < tz0) index |= 0b100; if (tym < tz0) index |= 0b010; } @@ -402,158 +390,8 @@ class Octree { // Define some instances for easy use in Python -typedef Ray<3> Ray3D; -typedef RayInfo Ray3DInt; -typedef RayInfo Ray3DLong; template using Octree3D = Octree; // Instantiate stuff -template class Octree; - -void test1() { - - // std::array bitMask = {true, false, true}; - - for (unsigned char i = 0; i < 8; i++){ - // auto tmp = iflat2ijk<3>(i); - // std::cout << (int)i << " -> " << tmp[0] << tmp[1] << tmp[2] << " -> " << (int)ijk2iflat<3>(iflat2ijk<3>(i)) << std::endl; - assert(ijk2iflat<3>(iflat2ijk<3>(i)) == i); - } - -} - -void test2() { - // Shouldnt crash - int index = 0; - int N = 4; - F size[3] = {1, 1, 1}; - Octree, 3> o(N, size); - for (auto i = 0; i < 1<({i, j, k})); - ++index; - } - } - } -} - -void test3(){ - int N = 4; - F size[3] = {1, 1, 1}; - Octree, 3> o(N, size); - F ox, oy, oz; - F rx, ry, rz; - // std::cin >> ox >> oy >> oz; - // std::cin >> rx >> ry >> rz; - - ox = 0.01; - oy = 0.84; - oz = 0.95; - - rx = 1.; - ry = -1.2; - rz = -1.5; - - F oo[3]; - oo[0] = ox; - oo[1] = oy; - oo[2] = oz; - F rr[3]; - rr[0] = rx; - rr[1] = ry; - rr[2] = rz; - Ray<3> r(oo, rr, -1e99, 1e99); - std::cerr<< "Casting ray in direction:\t" << rx << ", " << ry << ", " << rz << "(len=" << sqrt(r.d[0]*r.d[0] + r.d[1]*r.d[1] + r.d[2]*r.d[2]) << ")" <> ret; - std::vector tList; - // o.cast_ray(&r, ret, tList); -} - - -void test4() { - int N = 6; - F size[3] = {1, 1, 1}; - Octree, 3> o(N, size); - - // Filling half of octree at level 3 - for (auto i = 0; i < 1<> pos(1024*1024); - std::vector> dir(1024*1024); - - std::mt19937 gen(16091992); - auto dis = std::uniform_real_distribution<> (0., 1.); - for (auto i = 0; i < (int) pos.size(); ++i) { - for (auto idim = 0; idim < 3; ++idim) { - pos[i][idim] = dis(gen); - dir[i][idim] = dis(gen)*2-1; - } - } - // auto ret = o.cast_rays(pos, dir, (int) pos.size()); - // for (auto k: ret) { - // std::cout << k[0] << " " << k[1] << " " << k[2] << std::endl; - // } -} - -void test5() { - std::ifstream inFile; - - inFile.open("/tmp/ipos.txt"); - int ix, iy, iz; - std::vector> ipos; - while (inFile >> ix >> iy >> iz) { - ipos.push_back({ix, iy, iz}); - } - - inFile.close(); - - inFile.open("/tmp/lvl.txt"); - std::vector ilvl; - while (inFile >> ix) { - ilvl.push_back(ix); - } - - // Create octree - double size[3] = {1, 1, 1}; - Octree3D oct(16, size); - for (auto i = 0; i < (int) ipos.size(); ++i) { - oct.insert_node(&ipos[i][0], ilvl[i], i); - } - - // Now cast a ray - double o[3] = {0.5, 0.5, 0.5}; - double d[3] = {1., 2., 3.}; - Ray3D r(o, d, -1e99, 1e99); - std::vector keyList; - std::vector tList; - oct.cast_ray(&r, keyList, tList); -} - -int main() { - // std::cout << "########################## TEST 1 ##########################" << std::endl; - // test1(); - // std::cout << "########################## TEST 2 ##########################" << std::endl; - // test2(); - // std::cout << "########################## TEST 3 ##########################" << std::endl; - // test3(); - // std::cout << "########################## TEST 4 ##########################" << std::endl; - // test4(); - std::cout << "########################## TEST 5 ##########################" << std::endl; - test5(); - return 0; -} +template class Octree; \ No newline at end of file From 2fc37a32fa516140d98fdf769b16b60ea67e8210 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 29 May 2020 16:37:53 +0100 Subject: [PATCH 420/653] Prevent memory leaks --- yt/utilities/lib/image_samplers.pyx | 3 ++- yt/utilities/lib/octree_raytracing.cpp | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 4791c213591..62e06be4c35 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -286,12 +286,13 @@ cdef class ImageSampler: for i in range(Nch): self.image[vi, vj, i] = idata.rgba[i] free(vc.data) + free(vc.mask) free(vc) idata.supp_data = NULL free(idata) # Free memory for j in range(size): - free(ret[j]) + del ret[j] free(ret) pass diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 8b7b747853f..1f112c0e8f4 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -277,7 +277,7 @@ class Octree { if (child) { recursive_remove_node(child); } - free(child); + delete child; } free(node->children); } From e26ef97d3312eda827d45b8e751edf1128479c70 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Fri, 29 May 2020 17:55:19 +0100 Subject: [PATCH 421/653] Cast rays one by one --- yt/utilities/lib/cyoctree_raytracing.pxd | 4 +-- yt/utilities/lib/image_samplers.pyx | 38 ++++++++++++++---------- yt/utilities/lib/octree_raytracing.cpp | 16 ++++------ 3 files changed, 29 insertions(+), 29 deletions(-) diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/cyoctree_raytracing.pxd index c45b1b27169..4bf2e41ff48 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pxd +++ b/yt/utilities/lib/cyoctree_raytracing.pxd @@ -20,10 +20,10 @@ cdef extern from "octree_raytracing.cpp": vector[T] keys vector[double] t - cdef cppclass Octree3D[T]: + cdef cppclass Octree3D[T] nogil: Octree3D(int depth, double* LE, double* RE) void insert_node_no_ret(const int* ipos, const int lvl, T key) - RayInfo[T]** cast_rays(const double* origins, const double* directions, const int Nrays) + void cast_ray(double* origins, double* directions, vector[T] keyList, vector[double] tList) cdef class CythonOctreeRayTracing: cdef Octree3D[int]* oct diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 62e06be4c35..c89891d7dd2 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -30,6 +30,8 @@ from .fixed_interpolator cimport \ offset_fill, \ vertex_interp +from yt.funcs import mylog + from .cyoctree_raytracing cimport CythonOctreeRayTracing, RayInfo cdef extern from "platform_dep.h": @@ -197,14 +199,12 @@ cdef class ImageSampler: @cython.wraparound(False) @cython.cdivision(True) def cast_through_octree(self, PartitionedGrid pg, CythonOctreeRayTracing oct, int num_threads = 0): - cdef RayInfo[int]** ret cdef RayInfo[int]* ri self.setup(pg) cdef sampler_function* sampler = self.sample cdef np.int64_t nx, ny, size - cdef np.int64_t iter[4] cdef VolumeContainer *vc cdef ImageAccumulator *idata @@ -212,14 +212,6 @@ cdef class ImageSampler: ny = self.nv[1] size = nx * ny - cdef np.float64_t* vp_pos_ptr = &self.vp_pos[0,0,0] - cdef np.float64_t* vp_dir_ptr = &self.vp_dir[0,0,0] - - ret = oct.oct.cast_rays(vp_pos_ptr, vp_dir_ptr, size) - - cdef int* cell_ind - cdef double* tval - cdef int i, j, k, vi, vj, icell cdef int[3] index = [0, 0, 0] cdef int chunksize = 100 @@ -228,10 +220,13 @@ cdef class ImageSampler: cdef np.float64_t vp_dir_len + mylog.debug('Integrating rays') with nogil, parallel(num_threads=num_threads): idata = malloc(sizeof(ImageAccumulator)) idata.supp_data = self.supp_data + ri = new RayInfo[int]() + vc = malloc(sizeof(VolumeContainer)) vc.n_fields = 1 vc.data = malloc(sizeof(np.float64_t*)) @@ -250,9 +245,12 @@ cdef class ImageSampler: self.vp_dir[vi, vj, 0]**2 + self.vp_dir[vi, vj, 1]**2 + self.vp_dir[vi, vj, 2]**2) + + # Cast ray + oct.oct.cast_ray(&self.vp_pos[vi, vj, 0], &self.vp_dir[vi, vj, 0], + ri.keys, ri.t) # Contains the ordered indices of the cells hit by the ray # and the entry/exit t values - ri = ret[j] if ri.keys.size() == 0: continue @@ -285,16 +283,24 @@ cdef class ImageSampler: ) for i in range(Nch): self.image[vi, vj, i] = idata.rgba[i] + + # Empty keys and t + ri.keys.clear() + ri.t.clear() + + del ri free(vc.data) free(vc.mask) free(vc) idata.supp_data = NULL free(idata) - # Free memory - for j in range(size): - del ret[j] - free(ret) - pass + + mylog.debug('Done integration') + # # Free memory + # for j in range(size): + # del ret[j] + # free(ret) + cdef void setup(self, PartitionedGrid pg): return diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/octree_raytracing.cpp index 1f112c0e8f4..5d4cc928e0a 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/octree_raytracing.cpp @@ -52,17 +52,6 @@ struct Ray { F tmin = -1e99; F tmax = 1e99; - Ray(const std::array _o, const std::array _d, const F _tmin, const F _tmax) : o(_o), tmin(_tmin), tmax(_tmax) { - F dd = 0; - for (auto idim = 0; idim < Ndim; ++idim) { - dd += _d[idim] * _d[idim]; - } - dd = sqrt(dd); - for (auto idim = 0; idim < Ndim; ++idim) { - d[idim] = _d[idim] / dd; - } - }; - Ray(const F* _o, const F* _d, const F _tmin, const F _tmax) : tmin(_tmin), tmax(_tmax) { for (auto idim = 0; idim < Ndim; ++idim) { o[idim] = _o[idim]; @@ -240,6 +229,11 @@ class Octree { root, a, keyList, tList); } + void cast_ray(double* o, double* d, keyVector &keyList, std::vector &tList) { + Ray r(o, d, -1e99, 1e99); + cast_ray(&r, keyList, tList); + } + private: /* From dd52b0f8bc3d8790972c2dbe9e367696ff6f3252 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 8 Jun 2020 14:02:08 +0100 Subject: [PATCH 422/653] Help compiler with optimization --- .../lib/field_interpolation_tables.pxd | 18 ++++++++++-------- 1 file changed, 10 insertions(+), 8 deletions(-) diff --git a/yt/utilities/lib/field_interpolation_tables.pxd b/yt/utilities/lib/field_interpolation_tables.pxd index 7ff9814b8a2..07b11128e07 100644 --- a/yt/utilities/lib/field_interpolation_tables.pxd +++ b/yt/utilities/lib/field_interpolation_tables.pxd @@ -51,7 +51,7 @@ cdef inline void FIT_initialize_table(FieldInterpolationTable *fit, int nbins, @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -cdef inline np.float64_t FIT_get_value(FieldInterpolationTable *fit, +cdef inline np.float64_t FIT_get_value(const FieldInterpolationTable *fit, np.float64_t dvs[6]) nogil: cdef np.float64_t dd, dout cdef int bin_id @@ -69,25 +69,27 @@ cdef inline np.float64_t FIT_get_value(FieldInterpolationTable *fit, @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) -cdef inline void FIT_eval_transfer(np.float64_t dt, np.float64_t *dvs, - np.float64_t *rgba, int n_fits, - FieldInterpolationTable fits[6], - int field_table_ids[6], int grey_opacity) nogil: +cdef inline void FIT_eval_transfer( + const np.float64_t dt, np.float64_t *dvs, + np.float64_t *rgba, const int n_fits, + const FieldInterpolationTable fits[6], + const int field_table_ids[6], const int grey_opacity) nogil: cdef int i, fid cdef np.float64_t ta cdef np.float64_t istorage[6] cdef np.float64_t trgba[6] - for i in range(6): istorage[i] = 0.0 + # for i in range(6): istorage[i] = 0.0 for i in range(n_fits): istorage[i] = FIT_get_value(&fits[i], dvs) for i in range(n_fits): fid = fits[i].weight_table_id - if fid != -1: istorage[i] *= istorage[fid] + if fid != -1: + istorage[i] *= istorage[fid] for i in range(6): trgba[i] = istorage[field_table_ids[i]] if grey_opacity == 1: - ta = fmax(1.0 - dt*trgba[3],0.0) + ta = fmax(1.0 - dt*trgba[3], 0.0) for i in range(4): rgba[i] = dt*trgba[i] + ta*rgba[i] else: From 9d54a831c235871a7093d4dfc66b8cc3ab55309c Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 8 Jun 2020 14:37:11 +0100 Subject: [PATCH 423/653] Minor help for the compiler --- yt/utilities/lib/field_interpolation_tables.pxd | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/field_interpolation_tables.pxd b/yt/utilities/lib/field_interpolation_tables.pxd index 07b11128e07..98279f068c2 100644 --- a/yt/utilities/lib/field_interpolation_tables.pxd +++ b/yt/utilities/lib/field_interpolation_tables.pxd @@ -62,8 +62,9 @@ cdef inline np.float64_t FIT_get_value(const FieldInterpolationTable *fit, dd = dvs[fit.field_id] - fit.d0[bin_id] # x - x0 dout = fit.values[bin_id] + dd * fit.dy[bin_id] - if fit.weight_field_id != -1: - dout *= dvs[fit.weight_field_id] + cdef int wfi = fit.weight_field_id + if wfi != -1: + dout *= dvs[wfi] return dout @cython.boundscheck(False) From a86d3000bbb2dce67ddd836915fd986a55bab4f7 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 1 Jul 2020 12:29:16 +0100 Subject: [PATCH 424/653] Quick fixup --- yt/visualization/volume_rendering/api.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/visualization/volume_rendering/api.py b/yt/visualization/volume_rendering/api.py index fcc98be62c6..71100d15760 100644 --- a/yt/visualization/volume_rendering/api.py +++ b/yt/visualization/volume_rendering/api.py @@ -11,6 +11,7 @@ OpaqueSource, PointSource, VolumeSource, + create_volume_source, ) from .scene import Scene from .transfer_function_helper import TransferFunctionHelper From d044c4a820ca5b2ee338e0b9974ec85193951193 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 21:04:01 +0100 Subject: [PATCH 425/653] Fixed interpolator is now cpp --- setup.py | 2 +- yt/utilities/lib/cyoctree_raytracing.pxd | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/setup.py b/setup.py index f9492b13a2a..3ebf13b17b4 100644 --- a/setup.py +++ b/setup.py @@ -56,7 +56,7 @@ ], "STD_LIBS": std_libs, "OMP_ARGS": omp_args, - "FIXED_INTERP": "yt/utilities/lib/fixed_interpolator.c", + "FIXED_INTERP": "yt/utilities/lib/fixed_interpolator.cpp", "ARTIO_SOURCE": glob.glob("yt/frontends/artio/artio_headers/*.c"), } diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/cyoctree_raytracing.pxd index 4bf2e41ff48..a863d9e2e41 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pxd +++ b/yt/utilities/lib/cyoctree_raytracing.pxd @@ -1,3 +1,5 @@ +# distutils: language = c++ +# distutils: extra_compile_args=["-std=c++11"] """This is a wrapper around the C++ class to efficiently cast rays into an octree. It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. """ From ca5a44ff9c5c6e52ec7832cb816e63dab034a5aa Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 2 Jul 2020 21:12:36 +0100 Subject: [PATCH 426/653] Fixing c++ issues --- yt/utilities/lib/cyoctree_raytracing.pxd | 2 -- yt/utilities/lib/cyoctree_raytracing.pyx | 2 ++ yt/utilities/lib/image_samplers.pyx | 1 + yt/utilities/lib/marching_cubes.pyx | 1 + yt/utilities/lib/partitioned_grid.pyx | 1 + 5 files changed, 5 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/cyoctree_raytracing.pxd index a863d9e2e41..4bf2e41ff48 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pxd +++ b/yt/utilities/lib/cyoctree_raytracing.pxd @@ -1,5 +1,3 @@ -# distutils: language = c++ -# distutils: extra_compile_args=["-std=c++11"] """This is a wrapper around the C++ class to efficiently cast rays into an octree. It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. """ diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index c38e2d55809..8daa9013dcf 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -1,3 +1,5 @@ +# distutils: language = c++ +# distutils: extra_compile_args=["-std=c++11"] """This is a wrapper around the C++ class to efficiently cast rays into an octree. It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. """ diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index c89891d7dd2..75ba5a73608 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -3,6 +3,7 @@ # distutils: extra_link_args = OMP_ARGS # distutils: libraries = STD_LIBS # distutils: sources = FIXED_INTERP +# distutils: language = c++ """ Image sampler definitions diff --git a/yt/utilities/lib/marching_cubes.pyx b/yt/utilities/lib/marching_cubes.pyx index 1dd8d028ef9..01363b74064 100644 --- a/yt/utilities/lib/marching_cubes.pyx +++ b/yt/utilities/lib/marching_cubes.pyx @@ -1,6 +1,7 @@ # distutils: include_dirs = LIB_DIR # distutils: libraries = STD_LIBS # distutils: sources = FIXED_INTERP +# distutils: language = c++ """ Marching cubes implementation diff --git a/yt/utilities/lib/partitioned_grid.pyx b/yt/utilities/lib/partitioned_grid.pyx index 0ed89f497db..1c99747e121 100644 --- a/yt/utilities/lib/partitioned_grid.pyx +++ b/yt/utilities/lib/partitioned_grid.pyx @@ -1,6 +1,7 @@ # distutils: sources = FIXED_INTERP # distutils: include_dirs = LIB_DIR # distutils: libraries = STD_LIBS +# distutils: language = c++ """ Image sampler definitions From ba3e7f7eb1fb8a9def1b16a23e766f70c4de2788 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 10:42:28 +0100 Subject: [PATCH 427/653] Black/isort passes --- yt/data_objects/octree_subset.py | 21 ++++++------ yt/frontends/ramses/data_structures.py | 22 ++++++++++--- yt/utilities/lib/pyoctree_raytracing.py | 43 ++++++++++++++++--------- 3 files changed, 55 insertions(+), 31 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index e2a609f4792..092152f4454 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -526,8 +526,8 @@ def get_vertex_centered_data(self, fields): _old_api = isinstance(fields, (str, tuple)) if _old_api: message = ( - 'get_vertex_centered_data() requires list of fields, rather than ' - 'a single field as an argument.' + "get_vertex_centered_data() requires list of fields, rather than " + "a single field as an argument." ) warnings.warn(message, DeprecationWarning, stacklevel=2) fields = [fields] @@ -537,20 +537,21 @@ def get_vertex_centered_data(self, fields): new_fields = {} cg = self.retrieve_ghost_zones(1, fields) for field in fields: - new_fields[field] = cg[field][1: ,1: ,1: ].copy() - np.add(new_fields[field], cg[field][:-1,1: ,1: ], new_fields[field]) - np.add(new_fields[field], cg[field][1: ,:-1,1: ], new_fields[field]) - np.add(new_fields[field], cg[field][1: ,1: ,:-1], new_fields[field]) - np.add(new_fields[field], cg[field][:-1,1: ,:-1], new_fields[field]) - np.add(new_fields[field], cg[field][1: ,:-1,:-1], new_fields[field]) - np.add(new_fields[field], cg[field][:-1,:-1,1: ], new_fields[field]) - np.add(new_fields[field], cg[field][:-1,:-1,:-1], new_fields[field]) + new_fields[field] = cg[field][1:, 1:, 1:].copy() + np.add(new_fields[field], cg[field][:-1, 1:, 1:], new_fields[field]) + np.add(new_fields[field], cg[field][1:, :-1, 1:], new_fields[field]) + np.add(new_fields[field], cg[field][1:, 1:, :-1], new_fields[field]) + np.add(new_fields[field], cg[field][:-1, 1:, :-1], new_fields[field]) + np.add(new_fields[field], cg[field][1:, :-1, :-1], new_fields[field]) + np.add(new_fields[field], cg[field][:-1, :-1, 1:], new_fields[field]) + np.add(new_fields[field], cg[field][:-1, :-1, :-1], new_fields[field]) np.multiply(new_fields[field], 0.125, new_fields[field]) if _old_api: return new_fields[fields[0]] return new_fields + class OctreeSubsetBlockSlicePosition: def __init__(self, ind, block_slice): self.ind = ind diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index ee3b9587ed5..d9cc42c3c74 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -280,8 +280,14 @@ def _fill_with_ghostzones( if gz_cache: levels, cell_inds, file_inds, domains = gz_cache else: - gz_cache = levels, cell_inds, file_inds, domains = self.oct_handler.file_index_octs_with_ghost_zones( - selector, self.domain_id, cell_count) + gz_cache = ( + levels, + cell_inds, + file_inds, + domains, + ) = self.oct_handler.file_index_octs_with_ghost_zones( + selector, self.domain_id, cell_count + ) self._ghost_zone_cache = gz_cache # Initializing data container @@ -348,13 +354,19 @@ def fill(self, fd, fields, selector, file_handler): ) def retrieve_ghost_zones(self, ngz, fields, smoothed=False): - new_subset = getattr(self, '_subset_with_gz', None) + new_subset = getattr(self, "_subset_with_gz", None) if not new_subset: new_subset = RAMSESDomainSubset( - self.base_region, self.domain, self.ds, num_ghost_zones=ngz, base_grid=self + self.base_region, + self.domain, + self.ds, + num_ghost_zones=ngz, + base_grid=self, ) else: - mylog.debug('Reusing previous subset with ghost zone for domain %s' % self.domain_id) + mylog.debug( + "Reusing previous subset with ghost zone for domain %s" % self.domain_id + ) self._subset_with_gz = new_subset return new_subset diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index b20d48dac9f..735b18f6bd0 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -1,7 +1,9 @@ -from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing +from itertools import product import numpy as np -from itertools import product + +from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing + class OctreeRayTracing(object): octree = None @@ -17,16 +19,22 @@ def __init__(self, data_source): self.data_source = data_source LE = np.array([0, 0, 0], dtype=np.float64) RE = np.array([1, 1, 1], dtype=np.float64) - depth = data_source.ds.parameters['levelmax'] + depth = data_source.ds.parameters["levelmax"] self.octree = CythonOctreeRayTracing(LE, RE, depth) ds = data_source.ds - xyz = np.stack([data_source[_].to('unitary').value for _ in 'x y z'.split()], axis=-1) - lvl = data_source['grid_level'].astype(int).value + ds.parameters['levelmin'] + xyz = np.stack( + [data_source[_].to("unitary").value for _ in "x y z".split()], axis=-1 + ) + lvl = data_source["grid_level"].astype(int).value + ds.parameters["levelmin"] - ipos = np.floor(xyz * (1<<(ds.parameters['levelmax']))).astype(int) - self.octree.add_nodes(ipos.astype(np.int32), lvl.astype(np.int32), np.arange(len(ipos), dtype=np.int32)) + ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(int) + self.octree.add_nodes( + ipos.astype(np.int32), + lvl.astype(np.int32), + np.arange(len(ipos), dtype=np.int32), + ) def vertex_centered_data(self, field): data_source = self.data_source @@ -34,8 +42,10 @@ def vertex_centered_data(self, field): finfo = data_source.ds._get_field_info(*field) units = finfo.units - rv = data_source.ds.arr(np.zeros((2, 2, 2, data_source.ires.size), dtype="float64"), units) - ind = {(i, j, k): 0 for i, j, k in product(*[range(2)]*3)} + rv = data_source.ds.arr( + np.zeros((2, 2, 2, data_source.ires.size), dtype="float64"), units + ) + ind = {(i, j, k): 0 for i, j, k in product(*[range(2)] * 3)} for chunk in chunks: with data_source._chunked_read(chunk): gz = data_source._current_chunk.objs[0] @@ -43,16 +53,18 @@ def vertex_centered_data(self, field): wogz = gz._base_grid vertex_data = gz.get_vertex_centered_data([field])[field] - for i, j, k in product(*[range(2)]*3): + for i, j, k in product(*[range(2)] * 3): ind[i, j, k] += wogz.select( data_source.selector, - vertex_data[i:i+2, j:j+2, k:k+2, ...], - rv[i, j, k, :], ind[i, j, k]) + vertex_data[i : i + 2, j : j + 2, k : k + 2, ...], + rv[i, j, k, :], + ind[i, j, k], + ) return rv def set_fields(self, fields, log_fields, no_ghost, force=False): if no_ghost: - raise NotImplementedError('Ghost zones are required with Octree datasets') + raise NotImplementedError("Ghost zones are required with Octree datasets") assert len(fields) == 1 field = self.data_source._determine_fields(fields)[0] @@ -63,7 +75,7 @@ def set_fields(self, fields, log_fields, no_ghost, force=False): vertex_data = np.log10(vertex_data) # Vertex_data has shape (2, 2, 2, ...) - # Note: here we have the wrong ordering within the oct (classical Fortran/C + # Note: here we have the wrong ordering within the oct (classical Fortran/C # ordering issue) so we need to swap axis 0 and 2. self.data = vertex_data.swapaxes(0, 2).reshape(8, -1) @@ -85,6 +97,5 @@ def cast_rays(self, vp_pos, vp_dir): """ if not self._cell_index: # TODO: cache indices of cells - self._cell_index, self._tvalues = \ - self.octree.cast_rays(vp_pos, vp_dir) + self._cell_index, self._tvalues = self.octree.cast_rays(vp_pos, vp_dir) return self._cell_index, self._tvalues From c35a5b4cf57f620a7964cf8a9d68ce8e7db972be Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 10:58:11 +0100 Subject: [PATCH 428/653] Minor changes Do not format debug statements by default + remove useless comment --- yt/frontends/ramses/data_structures.py | 3 ++- yt/utilities/lib/field_interpolation_tables.pxd | 1 - 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index d9cc42c3c74..a88700fb86e 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -365,7 +365,8 @@ def retrieve_ghost_zones(self, ngz, fields, smoothed=False): ) else: mylog.debug( - "Reusing previous subset with ghost zone for domain %s" % self.domain_id + "Reusing previous subset with ghost zone for domain %s", + self.domain_id ) self._subset_with_gz = new_subset diff --git a/yt/utilities/lib/field_interpolation_tables.pxd b/yt/utilities/lib/field_interpolation_tables.pxd index 98279f068c2..59f649c8f05 100644 --- a/yt/utilities/lib/field_interpolation_tables.pxd +++ b/yt/utilities/lib/field_interpolation_tables.pxd @@ -79,7 +79,6 @@ cdef inline void FIT_eval_transfer( cdef np.float64_t ta cdef np.float64_t istorage[6] cdef np.float64_t trgba[6] - # for i in range(6): istorage[i] = 0.0 for i in range(n_fits): istorage[i] = FIT_get_value(&fits[i], dvs) for i in range(n_fits): From 5ab7000243d256caf4e449cc94f28a5963c864ca Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 10:59:22 +0100 Subject: [PATCH 429/653] Fix flaking --- yt/visualization/volume_rendering/api.py | 1 - yt/visualization/volume_rendering/volume_rendering.py | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/api.py b/yt/visualization/volume_rendering/api.py index 71100d15760..6339a35f5b6 100644 --- a/yt/visualization/volume_rendering/api.py +++ b/yt/visualization/volume_rendering/api.py @@ -10,7 +10,6 @@ MeshSource, OpaqueSource, PointSource, - VolumeSource, create_volume_source, ) from .scene import Scene diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index 99fe4de2b28..10d4a611f38 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -1,7 +1,7 @@ from yt.funcs import mylog from yt.utilities.exceptions import YTSceneFieldNotFound -from .render_source import MeshSource, VolumeSource, create_volume_source +from .render_source import MeshSource, create_volume_source from .scene import Scene from .utils import data_source_or_all From a8625ba5f34ea6e2f9b1a45e6e7148340f857764 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 11:09:21 +0100 Subject: [PATCH 430/653] More flexible create_volume_source function --- yt/visualization/volume_rendering/render_source.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index d4463b57714..3182dd78095 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -4,6 +4,7 @@ from yt.config import ytcfg from yt.data_objects.image_array import ImageArray +from yt.data_objects.static_output import Dataset from yt.funcs import ensure_numpy_array, iterable, mylog from yt.geometry.grid_geometry_handler import GridIndex from yt.geometry.oct_geometry_handler import OctreeIndex @@ -119,7 +120,11 @@ def set_zbuffer(self, zbuffer): def create_volume_source(data_source, field): - index_class = data_source.ds.index.__class__ + if isinstance(data_source, Dataset): + ds = data_source + else: + ds = data_source.ds + index_class = ds.index.__class__ if issubclass(index_class, GridIndex): return KDTreeVolumeSource(data_source, field) elif issubclass(index_class, OctreeIndex): From f0e74987f16074d133fc968459678b7d166ad726 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 11:13:18 +0100 Subject: [PATCH 431/653] Test should pass if they agree at machine precision --- yt/visualization/volume_rendering/tests/test_varia.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index 9580e2cc0f5..4c822614e78 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -95,7 +95,8 @@ def test_lazy_volume_source_construction(self): ad = self.ds.all_data() - assert source.transfer_function.x_bounds == list( + np.testing.assert_allclose( + source.transfer_function.x_bounds, np.log10(ad.quantities.extrema("density")) ) assert source.tfh.log == source.log_field From 114e534cb9bbff2f3ee76be74ad3a3c48c3d0b22 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 11:19:34 +0100 Subject: [PATCH 432/653] Black is happy --- yt/frontends/ramses/data_structures.py | 3 +-- yt/visualization/volume_rendering/tests/test_varia.py | 2 +- 2 files changed, 2 insertions(+), 3 deletions(-) diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index a88700fb86e..17621ae35a8 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -365,8 +365,7 @@ def retrieve_ghost_zones(self, ngz, fields, smoothed=False): ) else: mylog.debug( - "Reusing previous subset with ghost zone for domain %s", - self.domain_id + "Reusing previous subset with ghost zone for domain %s", self.domain_id ) self._subset_with_gz = new_subset diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index 4c822614e78..3d6ba9bd58f 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -97,7 +97,7 @@ def test_lazy_volume_source_construction(self): np.testing.assert_allclose( source.transfer_function.x_bounds, - np.log10(ad.quantities.extrema("density")) + np.log10(ad.quantities.extrema("density")), ) assert source.tfh.log == source.log_field From 370b83b2bdef5794770e95f7b7fa981d1b783305 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 17:13:52 +0100 Subject: [PATCH 433/653] Include comments --- yt/data_objects/octree_subset.py | 5 ++--- yt/frontends/ramses/data_structures.py | 14 +++++++------- yt/utilities/lib/image_samplers.pyx | 8 +++----- yt/utilities/lib/pyoctree_raytracing.py | 4 +--- yt/visualization/volume_rendering/render_source.py | 11 +++++++---- 5 files changed, 20 insertions(+), 22 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 092152f4454..2b68281f31c 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -1,4 +1,3 @@ -import warnings from contextlib import contextmanager import numpy as np @@ -6,7 +5,7 @@ import yt.geometry.particle_deposit as particle_deposit import yt.geometry.particle_smooth as particle_smooth from yt.data_objects.data_containers import YTSelectionContainer -from yt.funcs import mylog +from yt.funcs import mylog, issue_deprecation_warning from yt.geometry.particle_oct_container import ParticleOctreeContainer from yt.units.dimensions import length from yt.units.yt_array import YTArray @@ -529,7 +528,7 @@ def get_vertex_centered_data(self, fields): "get_vertex_centered_data() requires list of fields, rather than " "a single field as an argument." ) - warnings.warn(message, DeprecationWarning, stacklevel=2) + issue_deprecation_warning(message) fields = [fields] # Make sure the field list has only unique entries diff --git a/yt/frontends/ramses/data_structures.py b/yt/frontends/ramses/data_structures.py index 17621ae35a8..4eef8dbfe20 100644 --- a/yt/frontends/ramses/data_structures.py +++ b/yt/frontends/ramses/data_structures.py @@ -354,8 +354,12 @@ def fill(self, fd, fields, selector, file_handler): ) def retrieve_ghost_zones(self, ngz, fields, smoothed=False): - new_subset = getattr(self, "_subset_with_gz", None) - if not new_subset: + try: + new_subset = self._subset_with_gz + mylog.debug( + "Reusing previous subset with ghost zone for domain %s", self.domain_id + ) + except AttributeError: new_subset = RAMSESDomainSubset( self.base_region, self.domain, @@ -363,11 +367,7 @@ def retrieve_ghost_zones(self, ngz, fields, smoothed=False): num_ghost_zones=ngz, base_grid=self, ) - else: - mylog.debug( - "Reusing previous subset with ghost zone for domain %s", self.domain_id - ) - self._subset_with_gz = new_subset + self._subset_with_gz = new_subset return new_subset diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 75ba5a73608..a9c344704dd 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -73,7 +73,7 @@ cdef class ImageSampler: cdef int i self.volume_method = kwargs.pop('volume_method', None) - if self.volume_method and self.volume_method not in ('KDTree', 'Octree'): + if self.volume_method not in ('KDTree', 'Octree'): raise NotImplementedError( 'Invalid volume method "%s".' % self.svolume_method) camera_data = kwargs.pop("camera_data", None) @@ -139,6 +139,8 @@ cdef class ImageSampler: return self.cast_through_kdtree(pg, **kwa) elif self.volume_method == 'Octree': return self.cast_through_octree(pg, **kwa) + else: + raise NotImplementedError @cython.boundscheck(False) @cython.wraparound(False) @@ -297,10 +299,6 @@ cdef class ImageSampler: free(idata) mylog.debug('Done integration') - # # Free memory - # for j in range(size): - # del ret[j] - # free(ret) cdef void setup(self, PartitionedGrid pg): diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 735b18f6bd0..9bb1af1af13 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -24,9 +24,7 @@ def __init__(self, data_source): self.octree = CythonOctreeRayTracing(LE, RE, depth) ds = data_source.ds - xyz = np.stack( - [data_source[_].to("unitary").value for _ in "x y z".split()], axis=-1 - ) + xyz = np.stack([data_source[key].to("unitary").value for key in "xyz"], axis=-1) lvl = data_source["grid_level"].astype(int).value + ds.parameters["levelmin"] ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(int) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 3182dd78095..65580a211e6 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -129,6 +129,8 @@ def create_volume_source(data_source, field): return KDTreeVolumeSource(data_source, field) elif issubclass(index_class, OctreeIndex): return OctreeVolumeSource(data_source, field) + else: + raise NotImplementedError class VolumeSource(RenderSource): @@ -567,9 +569,7 @@ def _get_volume(self): data = self.data_source ds = data.ds - xyz = np.stack( - [data[_].to("unitary").value for _ in "x y z".split()], axis=-1 - ) + xyz = np.stack([data[key].to("unitary").value for key in "xyz"], axis=-1) lvl = data["grid_level"].astype(np.int32).value + ds.parameters["levelmin"] ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(np.int32) @@ -601,7 +601,10 @@ def render(self, camera, zbuffer=None): """ self.zbuffer = zbuffer self.set_sampler(camera) - assert self.sampler is not None + if self.sampler is None: + raise RuntimeError( + "No sampler set. This is likely a bug as it should never happen." + ) data = self.data_source From 8c4ef1f30fab1cc0457f8ebe82e61cce9a449f51 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:14:26 +0200 Subject: [PATCH 434/653] Pass exception upwards --- yt/geometry/oct_container.pxd | 2 +- yt/geometry/oct_container.pyx | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/geometry/oct_container.pxd b/yt/geometry/oct_container.pxd index 75ada2b97e7..446d4f8087a 100644 --- a/yt/geometry/oct_container.pxd +++ b/yt/geometry/oct_container.pxd @@ -74,7 +74,7 @@ cdef class OctreeContainer: OctVisitor visitor, int vc = ?, np.int64_t *indices = ?) cdef Oct *next_root(self, int domain_id, int ind[3]) - cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent) + cdef Oct *next_child(self, int domain_id, int ind[3], Oct *parent) except? NULL cdef void append_domain(self, np.int64_t domain_count) # The fill_style is the ordering, C or F, of the octs in the file. "o" # corresponds to C, and "r" is for Fortran. diff --git a/yt/geometry/oct_container.pyx b/yt/geometry/oct_container.pyx index 5d9a89a842f..dedd12e2369 100644 --- a/yt/geometry/oct_container.pyx +++ b/yt/geometry/oct_container.pyx @@ -643,7 +643,7 @@ cdef class OctreeContainer: self.nocts += 1 return next - cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent): + cdef Oct* next_child(self, int domain_id, int ind[3], Oct *parent) except? NULL: cdef int i cdef Oct *next = NULL if parent.children != NULL: From 227b7e5f722419d6c008bb6b49204f1c27ea4a85 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:16:26 +0200 Subject: [PATCH 435/653] Add valueerror instead of assertion --- yt/utilities/lib/pyoctree_raytracing.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 9bb1af1af13..ee04f619114 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -64,7 +64,12 @@ def set_fields(self, fields, log_fields, no_ghost, force=False): if no_ghost: raise NotImplementedError("Ghost zones are required with Octree datasets") - assert len(fields) == 1 + if len(fields) != 1: + raise ValueError( + 'Can only set one fields at a time. ' + 'This is likely a bug, and should be reported.' + ) + field = self.data_source._determine_fields(fields)[0] take_log = log_fields[0] vertex_data = self.vertex_centered_data(field) From 87ad009017c521f8885ac8b6b32d7d598e34ee00 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:17:30 +0200 Subject: [PATCH 436/653] Remove TODO that's been done --- yt/utilities/lib/pyoctree_raytracing.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index ee04f619114..89ac3b30c0b 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -99,6 +99,5 @@ def cast_rays(self, vp_pos, vp_dir): The t value at entry and exit for each cell. """ if not self._cell_index: - # TODO: cache indices of cells self._cell_index, self._tvalues = self.octree.cast_rays(vp_pos, vp_dir) return self._cell_index, self._tvalues From a5ad26ca10ab6076ca58296491e44d75711ddf46 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:23:37 +0200 Subject: [PATCH 437/653] Use ABC instead of manually raising NotImplementedError --- .../volume_rendering/render_source.py | 17 +++++++++++++++-- 1 file changed, 15 insertions(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 65580a211e6..6eaaa34fdf7 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -1,6 +1,7 @@ from functools import wraps import numpy as np +import abc from yt.config import ytcfg from yt.data_objects.image_array import ImageArray @@ -133,7 +134,7 @@ def create_volume_source(data_source, field): raise NotImplementedError -class VolumeSource(RenderSource): +class VolumeSource(RenderSource, abc.ABC): """A class for rendering data from a volumetric data source Examples of such sources include a sphere, cylinder, or the @@ -433,6 +434,18 @@ def set_sampler(self, camera, interpolated=True): self.sampler = sampler assert self.sampler is not None + + @abc.abstractmethod + def _get_volume(self): + """The abstract volume associated with this VolumeSource + + This object does the heavy lifting to access data in an efficient manner + using a KDTree + """ + pass + + + @abc.abstractmethod @validate_volume def render(self, camera, zbuffer=None): """Renders an image using the provided camera @@ -453,7 +466,7 @@ def render(self, camera, zbuffer=None): the rendered image. """ - raise NotImplementedError() + pass def finalize_image(self, camera, image): """Parallel reduce the image. From 71d84664a9a497f7438a11eedca4cfb2360fbf47 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:26:03 +0200 Subject: [PATCH 438/653] isort-ing --- yt/visualization/volume_rendering/render_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 6eaaa34fdf7..1e9a7eabef2 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -1,7 +1,7 @@ +import abc from functools import wraps import numpy as np -import abc from yt.config import ytcfg from yt.data_objects.image_array import ImageArray From 65a405d07aa7bf769641cfd29db215b0f12befaa Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:26:51 +0200 Subject: [PATCH 439/653] Black pass --- yt/utilities/lib/pyoctree_raytracing.py | 4 ++-- yt/visualization/volume_rendering/render_source.py | 2 -- 2 files changed, 2 insertions(+), 4 deletions(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 89ac3b30c0b..232def6acfc 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -66,8 +66,8 @@ def set_fields(self, fields, log_fields, no_ghost, force=False): if len(fields) != 1: raise ValueError( - 'Can only set one fields at a time. ' - 'This is likely a bug, and should be reported.' + "Can only set one fields at a time. " + "This is likely a bug, and should be reported." ) field = self.data_source._determine_fields(fields)[0] diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 1e9a7eabef2..fbb77d9c665 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -434,7 +434,6 @@ def set_sampler(self, camera, interpolated=True): self.sampler = sampler assert self.sampler is not None - @abc.abstractmethod def _get_volume(self): """The abstract volume associated with this VolumeSource @@ -444,7 +443,6 @@ def _get_volume(self): """ pass - @abc.abstractmethod @validate_volume def render(self, camera, zbuffer=None): From 8429979687def6fe6d31edd2cb1cadf526109ad9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 17:20:05 +0100 Subject: [PATCH 440/653] Update yt/visualization/volume_rendering/render_source.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/visualization/volume_rendering/render_source.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index fbb77d9c665..2456ec6a3d0 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -479,7 +479,7 @@ def finalize_image(self, camera, image): image.shape = camera.resolution[0], camera.resolution[1], 4 # If the call is from VR, the image is rotated by 180 to get correct # up direction - if self.transfer_function.grey_opacity is False: + if not self.transfer_function.grey_opacity: image[:, :, 3] = 1 return image From 53ad67b4cbddfefb94971658c8ba8957f2dc3faa Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 20 Jul 2020 17:24:21 +0100 Subject: [PATCH 441/653] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/utilities/lib/pyoctree_raytracing.py | 3 ++- yt/visualization/volume_rendering/render_source.py | 6 +++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index 232def6acfc..d8fc9e4f64f 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -43,7 +43,8 @@ def vertex_centered_data(self, field): rv = data_source.ds.arr( np.zeros((2, 2, 2, data_source.ires.size), dtype="float64"), units ) - ind = {(i, j, k): 0 for i, j, k in product(*[range(2)] * 3)} + binary_3D_index_iter = product(*[range(2)] * 3) + ind = {(i, j, k): 0 for i, j, k in binary_3D_index_iter} for chunk in chunks: with data_source._chunked_read(chunk): gz = data_source._current_chunk.objs[0] diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 2456ec6a3d0..ec63bdc4d68 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -625,11 +625,11 @@ def render(self, camera, zbuffer=None): RE = xyz + dx / 2 mylog.debug("Gathering data") - dt = np.stack([_ for _ in self.volume.data] + [*LE.T, *RE.T], axis=-1).reshape( + dt = np.stack(list(self.volume.data) + [*LE.T, *RE.T], axis=-1).reshape( 1, len(dx), 14, 1 ) - mask = np.full_like(dt[0, ...], 1, dtype=np.uint8) - dims = np.array([1, 1, 1]) + mask = np.full(dt.shape[1:], 1, dtype=np.uint8) + dims = np.array([1, 1, 1], dtype=int) pg = PartitionedGrid(0, dt, mask, LE.flatten(), RE.flatten(), dims, n_fields=1) mylog.debug("Casting rays") From 9e1dfd8e8b0c75c8c1dd876a0882f34714563a76 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 22 Jul 2020 16:23:06 +0100 Subject: [PATCH 442/653] Fix isort --- yt/data_objects/octree_subset.py | 2 +- yt/utilities/lib/cyoctree_raytracing.pyx | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 2b68281f31c..86394d9a14f 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -5,7 +5,7 @@ import yt.geometry.particle_deposit as particle_deposit import yt.geometry.particle_smooth as particle_smooth from yt.data_objects.data_containers import YTSelectionContainer -from yt.funcs import mylog, issue_deprecation_warning +from yt.funcs import issue_deprecation_warning, mylog from yt.geometry.particle_oct_container import ParticleOctreeContainer from yt.units.dimensions import length from yt.units.yt_array import YTArray diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/cyoctree_raytracing.pyx index 8daa9013dcf..761be09322e 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/cyoctree_raytracing.pyx @@ -6,16 +6,20 @@ It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. cimport numpy as np + import numpy as np -from libcpp.vector cimport vector + cimport cython -from cython.parallel import prange, parallel +from libcpp.vector cimport vector + +from cython.parallel import parallel, prange + from libc.stdlib cimport free, malloc -from .image_samplers cimport ImageAccumulator, ImageSampler from .grid_traversal cimport sampler_function -from .volume_container cimport VolumeContainer +from .image_samplers cimport ImageAccumulator, ImageSampler from .partitioned_grid cimport PartitionedGrid +from .volume_container cimport VolumeContainer DEF Nch = 4 From b54094dcffc50001d90179004bfb46ddd4f141ae Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:37:08 +0200 Subject: [PATCH 443/653] Fix mistake in image samplers --- yt/utilities/lib/image_samplers.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index a9c344704dd..cbd7c9c30c2 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -75,7 +75,7 @@ cdef class ImageSampler: self.volume_method = kwargs.pop('volume_method', None) if self.volume_method not in ('KDTree', 'Octree'): raise NotImplementedError( - 'Invalid volume method "%s".' % self.svolume_method) + 'Invalid volume method "%s".' % self.volume_method) camera_data = kwargs.pop("camera_data", None) if camera_data is not None: self.camera_data = camera_data From 2983a1646b9bd8290e88f65266153edc1d24e28f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Mon, 27 Jul 2020 23:44:51 +0200 Subject: [PATCH 444/653] Remove erroneous exception raising --- yt/utilities/lib/image_samplers.pyx | 3 --- 1 file changed, 3 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index cbd7c9c30c2..2e26c349b56 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -73,9 +73,6 @@ cdef class ImageSampler: cdef int i self.volume_method = kwargs.pop('volume_method', None) - if self.volume_method not in ('KDTree', 'Octree'): - raise NotImplementedError( - 'Invalid volume method "%s".' % self.volume_method) camera_data = kwargs.pop("camera_data", None) if camera_data is not None: self.camera_data = camera_data From ab6fabf3a5c1e5e41f95c8deb471cf8abfb3342a Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 28 Jul 2020 00:12:29 +0200 Subject: [PATCH 445/653] Specify volume method --- .../volume_rendering/old_camera.py | 26 ++++++++++++++----- yt/visualization/volume_rendering/utils.py | 1 - 2 files changed, 20 insertions(+), 7 deletions(-) diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 3edce0dcb92..061f81dda12 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -671,7 +671,11 @@ def get_sampler_args(self, image): self.transfer_function, self.sub_samples, ) - return args, {"lens_type": "plane-parallel"} + kwargs = { + "lens_type": "plane-parallel", + "volume_method": "KDTree", + } + return args, kwargs def get_sampler(self, args, kwargs): if self.use_light: @@ -1362,7 +1366,11 @@ def get_sampler_args(self, image): self.transfer_function, self.sub_samples, ) - return args, {"lens_type": "perspective"} + kwargs = { + "lens_type": "perspective", + "volume_method": "KDTree", + } + return args, kwargs def _render(self, double_check, num_threads, image, sampler): ncells = sum(b.source_mask.size for b in self.volume.bricks) @@ -1562,7 +1570,8 @@ def get_sampler_args(self, image): if self._needs_tf: args += (self.transfer_function,) args += (self.sub_samples,) - return args, {} + + return args, {"volume_method": "KDTree"} def _render(self, double_check, num_threads, image, sampler): pbar = get_pbar( @@ -1781,7 +1790,7 @@ def get_sampler_args(self, image): self.transfer_function, self.sub_samples, ) - return args, {} + return args, {"volume_method": "KDTree"} def finalize_image(self, image): image.shape = self.resolution, self.resolution, 4 @@ -2176,7 +2185,8 @@ def get_sampler_args(self, image): np.array(self.width, dtype="float64"), self.sub_samples, ) - return args, {"lens_type": "plane-parallel"} + kwargs = {"lens_type": "plane-parallel", "volume_method": "KDTree"} + return args, kwargs def finalize_image(self, image): ds = self.ds @@ -2419,7 +2429,11 @@ def get_sampler_args(self, image): self.transfer_function, self.sub_samples, ) - return args, {"lens_type": "stereo-spherical"} + kwargs = { + "lens_type": "stereo-spherical", + "volume_method": "KDTree", + } + return args, kwargs def snapshot( self, diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 7d6f92bab03..169c266ab1d 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -80,7 +80,6 @@ def new_volume_render_sampler(camera, render_source): ) else: kwargs["zbuffer"] = np.ones(params["image"].shape[:2], "float64") - sampler = VolumeRenderSampler(*args, **kwargs) return sampler From f0a280f319c4369cb25f6a7550b1a5c6dfa10590 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 28 Jul 2020 00:33:43 +0200 Subject: [PATCH 446/653] Last pass at fixing render_source errors --- yt/utilities/lib/image_samplers.pyx | 11 ++++++++--- yt/visualization/volume_rendering/render_source.py | 6 ++---- yt/visualization/volume_rendering/utils.py | 5 ++++- 3 files changed, 14 insertions(+), 8 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 2e26c349b56..0a95f192aaa 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -69,10 +69,12 @@ cdef class ImageSampler: np.ndarray[np.float64_t, ndim=1] x_vec, np.ndarray[np.float64_t, ndim=1] y_vec, np.ndarray[np.float64_t, ndim=1] width, - *args, **kwargs): + *args, + str volume_method=None, + **kwargs): cdef int i - self.volume_method = kwargs.pop('volume_method', None) + self.volume_method = volume_method camera_data = kwargs.pop("camera_data", None) if camera_data is not None: self.camera_data = camera_data @@ -137,7 +139,10 @@ cdef class ImageSampler: elif self.volume_method == 'Octree': return self.cast_through_octree(pg, **kwa) else: - raise NotImplementedError + raise NotImplementedError( + 'Volume rendering has not been implemented for method: "%s"' % + self.volume_method + ) @cython.boundscheck(False) @cython.wraparound(False) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index ec63bdc4d68..65562c94f5c 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -121,10 +121,8 @@ def set_zbuffer(self, zbuffer): def create_volume_source(data_source, field): - if isinstance(data_source, Dataset): - ds = data_source - else: - ds = data_source.ds + data_source = data_source_or_all(data_source) + ds = data_source.ds index_class = ds.index.__class__ if issubclass(index_class, GridIndex): return KDTreeVolumeSource(data_source, field) diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 169c266ab1d..3e75b531dd1 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -123,7 +123,10 @@ def new_projection_sampler(camera, render_source): params["width"], params["num_samples"], ) - kwargs = {"lens_type": params["lens_type"]} + kwargs = { + "lens_type": params["lens_type"], + "volume_method": render_source.volume_method + } if render_source.zbuffer is not None: kwargs["zbuffer"] = render_source.zbuffer.z else: From 6bd49ee602647c6fb1191b4a6d311537c72aa5a3 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 28 Jul 2020 00:54:54 +0200 Subject: [PATCH 447/653] black pass --- yt/visualization/volume_rendering/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 3e75b531dd1..92e62207b56 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -125,7 +125,7 @@ def new_projection_sampler(camera, render_source): ) kwargs = { "lens_type": params["lens_type"], - "volume_method": render_source.volume_method + "volume_method": render_source.volume_method, } if render_source.zbuffer is not None: kwargs["zbuffer"] = render_source.zbuffer.z From 34807603d39736ab7e23d7417d2725576f8c7834 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 28 Jul 2020 00:55:49 +0200 Subject: [PATCH 448/653] (snow)flake --- yt/visualization/volume_rendering/render_source.py | 1 - 1 file changed, 1 deletion(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 65562c94f5c..dfc114e9ab6 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -5,7 +5,6 @@ from yt.config import ytcfg from yt.data_objects.image_array import ImageArray -from yt.data_objects.static_output import Dataset from yt.funcs import ensure_numpy_array, iterable, mylog from yt.geometry.grid_geometry_handler import GridIndex from yt.geometry.oct_geometry_handler import OctreeIndex From c768d9d268e8ab2ed21956c27d1efd5220e627cc Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Thu, 30 Jul 2020 14:19:08 +0200 Subject: [PATCH 449/653] Start fixing what I broke ... --- yt/frontends/stream/data_structures.py | 84 ++++++++++++++++++- yt/utilities/lib/pyoctree_raytracing.py | 12 ++- .../volume_rendering/off_axis_projection.py | 27 +----- .../volume_rendering/render_source.py | 10 --- 4 files changed, 94 insertions(+), 39 deletions(-) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index ac71ef66a64..1a58c2e7cfc 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -7,6 +7,7 @@ from numbers import Number as numeric_type import numpy as np +from unyt.dimensions import number_density from yt.data_objects.field_data import YTFieldData from yt.data_objects.grid_patch import AMRGridPatch @@ -1689,7 +1690,8 @@ class StreamOctreeSubset(OctreeSubset): domain_id = 1 _domain_offset = 1 - def __init__(self, base_region, ds, oct_handler, over_refine_factor=1): + def __init__(self, base_region, ds, oct_handler, over_refine_factor=1, num_ghost_zones=0): + self._over_refine_factor = over_refine_factor self._num_zones = 1 << (over_refine_factor) self.field_data = YTFieldData() self.field_parameters = {} @@ -1702,7 +1704,33 @@ def __init__(self, base_region, ds, oct_handler, over_refine_factor=1): self.base_region = base_region self.base_selector = base_region.selector - def fill(self, content, dest, selector, offset): + self._num_ghost_zones = num_ghost_zones + + if num_ghost_zones > 0: + if not all(ds.periodicity): + mylog.warn("Ghost zones will wrongly assume the domain to be periodic.") + base_grid = StreamOctreeSubset(base_region, ds, oct_handler, over_refine_factor) + self._base_grid = base_grid + + def retrieve_ghost_zones(self, ngz, fields, smoothed=False): + try: + new_subset = self._subset_with_gz + mylog.debug( + "Reusing previous subset with ghost zone." + ) + except AttributeError: + new_subset = StreamOctreeSubset( + self.base_region, + self.ds, + self.oct_handler, + self._over_refine_factor, + num_ghost_zones=ngz + ) + self._subset_with_gz = new_subset + + return new_subset + + def _fill_no_ghostzones(self, content, dest, selector, offset): # Here we get a copy of the file, which we skip through and read the # bits we want. oct_handler = self.oct_handler @@ -1718,6 +1746,39 @@ def fill(self, content, dest, selector, offset): ) return count + def _fill_with_ghostzones(self, content, dest, selector, offset): + oct_handler = self.oct_handler + ndim = self.ds.dimensionality + cell_count = ( + selector.count_octs(self.oct_handler, self.domain_id) * self.nz ** ndim + ) + + gz_cache = getattr(self, "_ghost_zone_cache", None) + if gz_cache: + levels, cell_inds, file_inds, domains = gz_cache + else: + gz_cache = ( + levels, + cell_inds, + file_inds, + domains, + ) = oct_handler.file_index_octs_with_ghost_zones( + selector, self.domain_id, cell_count + ) + self._ghost_zone_cache = gz_cache + levels[:] = 0 + dest.update((field, np.empty(cell_count, dtype="float64")) for field in content) + # Make references ... + count = oct_handler.fill_level( + 0, levels, cell_inds, file_inds, dest, content, offset + ) + + def fill(self, content, dest, selector, offset): + if self._num_ghost_zones == 0: + return self._fill_no_ghostzones(content, dest, selector, offset) + else: + return self._fill_with_ghostzones(content, dest, selector, offset) + class StreamOctreeHandler(OctreeIndex): def __init__(self, ds, dataset_type=None): @@ -1795,6 +1856,25 @@ class StreamOctreeDataset(StreamDataset): _field_info_class = StreamFieldInfo _dataset_type = "stream_octree" + levelmax = None + + def __init__( + self, + stream_handler, + storage_filename=None, + geometry="cartesian", + unit_system="cgs", + ): + super(StreamOctreeDataset, self).__init__( + stream_handler, + storage_filename, + geometry, + unit_system + ) + # Set up levelmax + self.max_level = stream_handler.levels.max() + self.min_level = stream_handler.levels.min() + def load_octree( octree_mask, diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/pyoctree_raytracing.py index d8fc9e4f64f..218c7e4ca6d 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/pyoctree_raytracing.py @@ -2,6 +2,7 @@ import numpy as np +from yt.funcs import mylog from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing @@ -17,17 +18,22 @@ class OctreeRayTracing(object): def __init__(self, data_source): self.data_source = data_source + ds = data_source.ds LE = np.array([0, 0, 0], dtype=np.float64) RE = np.array([1, 1, 1], dtype=np.float64) - depth = data_source.ds.parameters["levelmax"] + lvl_min = ds.min_level + 1 + # This is the max refinement so that the smallest cells have size + # 1/2**depth + depth = lvl_min + ds.max_level + 1 self.octree = CythonOctreeRayTracing(LE, RE, depth) ds = data_source.ds xyz = np.stack([data_source[key].to("unitary").value for key in "xyz"], axis=-1) - lvl = data_source["grid_level"].astype(int).value + ds.parameters["levelmin"] + lvl = data_source["grid_level"].astype(int).value + lvl_min - ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(int) + ipos = np.floor(xyz * (1 << depth)).astype(int) + mylog.debug("Adding cells to volume") self.octree.add_nodes( ipos.astype(np.int32), lvl.astype(np.int32), diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 521aa83aee5..331b56fb39b 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -2,6 +2,8 @@ from yt.data_objects.api import ImageArray from yt.funcs import iterable, mylog +from yt.geometry.grid_geometry_handler import GridIndex +from yt.geometry.oct_geometry_handler import OctreeIndex from yt.units.unit_object import Unit from yt.utilities.lib.partitioned_grid import PartitionedGrid from yt.utilities.lib.pixelization_routines import ( @@ -365,30 +367,7 @@ def temp_weightfield(a, b): if vol.weight_field is not None: fields.append(vol.weight_field) - mylog.debug("Casting rays") - - for (grid, mask) in data_source.blocks: - data = [] - for f in fields: - # strip units before multiplying by mask for speed - grid_data = grid[f] - units = grid_data.units - data.append(data_source.ds.arr(grid_data.d * mask, units, dtype="float64")) - pg = PartitionedGrid( - grid.id, - data, - mask.astype("uint8"), - grid.LeftEdge, - grid.RightEdge, - grid.ActiveDimensions.astype("int64"), - ) - grid.clear_data() - vol.sampler(pg, num_threads=num_threads) - - image = vol.finalize_image(camera, vol.sampler.aimage) - image = ImageArray( - image, funits, registry=data_source.ds.unit_registry, info=image.info - ) + image = vol.render(camera) if weight is not None: data_source.ds.field_info.pop(("index", "temp_weightfield")) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index dfc114e9ab6..264e2e759db 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -573,16 +573,6 @@ def _get_volume(self): if self._volume is None: mylog.info("Creating volume") volume = OctreeRayTracing(self.data_source) - - data = self.data_source - ds = data.ds - - xyz = np.stack([data[key].to("unitary").value for key in "xyz"], axis=-1) - lvl = data["grid_level"].astype(np.int32).value + ds.parameters["levelmin"] - ipos = np.floor(xyz * (1 << (ds.parameters["levelmax"]))).astype(np.int32) - - mylog.debug("Adding cells to volume") - volume.octree.add_nodes(ipos, lvl, np.arange(len(ipos), dtype=np.int32)) self._volume = volume return self._volume From 6a31ae5883bf95bc405b538d3b1e3c9cbf790432 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 11:15:37 +0200 Subject: [PATCH 450/653] Passing volume method as kwa --- yt/utilities/lib/image_samplers.pyx | 22 +++++++++++++------ .../volume_rendering/old_camera.py | 17 ++++++++------ .../volume_rendering/render_source.py | 4 ++++ yt/visualization/volume_rendering/utils.py | 6 +++-- 4 files changed, 33 insertions(+), 16 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 0a95f192aaa..7433b130235 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -69,8 +69,8 @@ cdef class ImageSampler: np.ndarray[np.float64_t, ndim=1] x_vec, np.ndarray[np.float64_t, ndim=1] y_vec, np.ndarray[np.float64_t, ndim=1] width, + str volume_method, *args, - str volume_method=None, **kwargs): cdef int i @@ -357,9 +357,12 @@ cdef class InterpolatedProjectionSampler(ImageSampler): np.ndarray[np.float64_t, ndim=1] x_vec, np.ndarray[np.float64_t, ndim=1] y_vec, np.ndarray[np.float64_t, ndim=1] width, - n_samples = 10, **kwargs): + str volume_method, + n_samples = 10, + **kwargs + ): ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image, - x_vec, y_vec, width, **kwargs) + x_vec, y_vec, width, volume_method, **kwargs) # Now we handle tf_obj self.vra = \ malloc(sizeof(VolumeRenderAccumulator)) @@ -413,8 +416,11 @@ cdef class VolumeRenderSampler(ImageSampler): np.ndarray[np.float64_t, ndim=1] x_vec, np.ndarray[np.float64_t, ndim=1] y_vec, np.ndarray[np.float64_t, ndim=1] width, - tf_obj, n_samples = 10, - **kwargs): + str volume_method, + tf_obj, + n_samples = 10, + **kwargs + ): ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image, x_vec, y_vec, width, **kwargs) cdef int i @@ -501,12 +507,14 @@ cdef class LightSourceRenderSampler(ImageSampler): np.ndarray[np.float64_t, ndim=1] x_vec, np.ndarray[np.float64_t, ndim=1] y_vec, np.ndarray[np.float64_t, ndim=1] width, - tf_obj, n_samples = 10, + str volume_method, + tf_obj, + n_samples = 10, light_dir=[1.,1.,1.], light_rgba=[1.,1.,1.,1.], **kwargs): ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image, - x_vec, y_vec, width, **kwargs) + x_vec, y_vec, width, volume_method, **kwargs) cdef int i cdef np.ndarray[np.float64_t, ndim=1] temp # Now we handle tf_obj diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 061f81dda12..9b903ac19af 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -668,12 +668,12 @@ def get_sampler_args(self, image): self.orienter.unit_vectors[0], self.orienter.unit_vectors[1], np.array(self.width, dtype="float64"), + "KDTree", self.transfer_function, self.sub_samples, ) kwargs = { "lens_type": "plane-parallel", - "volume_method": "KDTree", } return args, kwargs @@ -1363,12 +1363,12 @@ def get_sampler_args(self, image): dummy, dummy, np.zeros(3, dtype="float64"), + "KDTree", self.transfer_function, self.sub_samples, ) kwargs = { "lens_type": "perspective", - "volume_method": "KDTree", } return args, kwargs @@ -1566,12 +1566,13 @@ def get_sampler_args(self, image): uv, uv, np.zeros(3, dtype="float64"), + "KDTree" ) if self._needs_tf: args += (self.transfer_function,) args += (self.sub_samples,) - return args, {"volume_method": "KDTree"} + return args, {} def _render(self, double_check, num_threads, image, sampler): pbar = get_pbar( @@ -1787,10 +1788,11 @@ def get_sampler_args(self, image): uv, uv, np.zeros(3, dtype="float64"), + "KDTree", self.transfer_function, self.sub_samples, ) - return args, {"volume_method": "KDTree"} + return args, {} def finalize_image(self, image): image.shape = self.resolution, self.resolution, 4 @@ -2183,9 +2185,10 @@ def get_sampler_args(self, image): self.orienter.unit_vectors[0], self.orienter.unit_vectors[1], np.array(self.width, dtype="float64"), + "KDTree", self.sub_samples, ) - kwargs = {"lens_type": "plane-parallel", "volume_method": "KDTree"} + kwargs = {"lens_type": "plane-parallel"} return args, kwargs def finalize_image(self, image): @@ -2426,12 +2429,12 @@ def get_sampler_args(self, image): dummy, dummy, np.zeros(3, dtype="float64"), + "KDTree", self.transfer_function, self.sub_samples, ) kwargs = { - "lens_type": "stereo-spherical", - "volume_method": "KDTree", + "lens_type": "stereo-spherical" } return args, kwargs diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 264e2e759db..8a52c395238 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -92,14 +92,18 @@ class RenderSource(ParallelAnalysisInterface): """ + volume_method = None + def __init__(self): super(RenderSource, self).__init__() self.opaque = False self.zbuffer = None + @abc.abstractmethod def render(self, camera, zbuffer=None): pass + @abc.abstractmethod def _validate(self): pass diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 92e62207b56..7d53e49922b 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -40,6 +40,7 @@ def new_mesh_sampler(camera, render_source, engine): params["x_vec"], params["y_vec"], params["width"], + render_source.volume_method, ) kwargs = {"lens_type": params["lens_type"]} if engine == "embree": @@ -63,12 +64,12 @@ def new_volume_render_sampler(camera, render_source): params["x_vec"], params["y_vec"], params["width"], + render_source.volume_method, params["transfer_function"], params["num_samples"], ) kwargs = { "lens_type": params["lens_type"], - "volume_method": render_source.volume_method, } if "camera_data" in params: kwargs["camera_data"] = params["camera_data"] @@ -97,6 +98,7 @@ def new_interpolated_projection_sampler(camera, render_source): params["x_vec"], params["y_vec"], params["width"], + render_source.volume_method, params["num_samples"], ) kwargs = {"lens_type": params["lens_type"]} @@ -121,11 +123,11 @@ def new_projection_sampler(camera, render_source): params["x_vec"], params["y_vec"], params["width"], + render_source.volume_method, params["num_samples"], ) kwargs = { "lens_type": params["lens_type"], - "volume_method": render_source.volume_method, } if render_source.zbuffer is not None: kwargs["zbuffer"] = render_source.zbuffer.z From 20faa4fda3ef06f7aa1fbfe7845f7bd4c4d740b2 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 11:23:23 +0200 Subject: [PATCH 451/653] Do not forget to pass on the volume_method --- yt/utilities/lib/image_samplers.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 7433b130235..2559e359f62 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -422,7 +422,7 @@ cdef class VolumeRenderSampler(ImageSampler): **kwargs ): ImageSampler.__init__(self, vp_pos, vp_dir, center, bounds, image, - x_vec, y_vec, width, **kwargs) + x_vec, y_vec, width, volume_method, **kwargs) cdef int i cdef np.ndarray[np.float64_t, ndim=1] temp # Now we handle tf_obj From 7315cc4240509633487e27200f13ee3cd08d16c4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 11:25:14 +0200 Subject: [PATCH 452/653] Black + isort pass --- yt/frontends/stream/data_structures.py | 19 +++++++++---------- .../volume_rendering/old_camera.py | 6 ++---- 2 files changed, 11 insertions(+), 14 deletions(-) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 1a58c2e7cfc..96facdab846 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -1690,7 +1690,9 @@ class StreamOctreeSubset(OctreeSubset): domain_id = 1 _domain_offset = 1 - def __init__(self, base_region, ds, oct_handler, over_refine_factor=1, num_ghost_zones=0): + def __init__( + self, base_region, ds, oct_handler, over_refine_factor=1, num_ghost_zones=0 + ): self._over_refine_factor = over_refine_factor self._num_zones = 1 << (over_refine_factor) self.field_data = YTFieldData() @@ -1709,22 +1711,22 @@ def __init__(self, base_region, ds, oct_handler, over_refine_factor=1, num_ghost if num_ghost_zones > 0: if not all(ds.periodicity): mylog.warn("Ghost zones will wrongly assume the domain to be periodic.") - base_grid = StreamOctreeSubset(base_region, ds, oct_handler, over_refine_factor) + base_grid = StreamOctreeSubset( + base_region, ds, oct_handler, over_refine_factor + ) self._base_grid = base_grid def retrieve_ghost_zones(self, ngz, fields, smoothed=False): try: new_subset = self._subset_with_gz - mylog.debug( - "Reusing previous subset with ghost zone." - ) + mylog.debug("Reusing previous subset with ghost zone.") except AttributeError: new_subset = StreamOctreeSubset( self.base_region, self.ds, self.oct_handler, self._over_refine_factor, - num_ghost_zones=ngz + num_ghost_zones=ngz, ) self._subset_with_gz = new_subset @@ -1866,10 +1868,7 @@ def __init__( unit_system="cgs", ): super(StreamOctreeDataset, self).__init__( - stream_handler, - storage_filename, - geometry, - unit_system + stream_handler, storage_filename, geometry, unit_system ) # Set up levelmax self.max_level = stream_handler.levels.max() diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 9b903ac19af..f5cd1fd41fb 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1566,7 +1566,7 @@ def get_sampler_args(self, image): uv, uv, np.zeros(3, dtype="float64"), - "KDTree" + "KDTree", ) if self._needs_tf: args += (self.transfer_function,) @@ -2433,9 +2433,7 @@ def get_sampler_args(self, image): self.transfer_function, self.sub_samples, ) - kwargs = { - "lens_type": "stereo-spherical" - } + kwargs = {"lens_type": "stereo-spherical"} return args, kwargs def snapshot( From 1d8b665c24f01821069b8b9ffbc2c4398a15bc7d Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 11:27:13 +0200 Subject: [PATCH 453/653] Flaking --- yt/frontends/stream/data_structures.py | 5 +---- yt/visualization/volume_rendering/off_axis_projection.py | 3 --- 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 96facdab846..6867532efb4 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -7,7 +7,6 @@ from numbers import Number as numeric_type import numpy as np -from unyt.dimensions import number_density from yt.data_objects.field_data import YTFieldData from yt.data_objects.grid_patch import AMRGridPatch @@ -1771,9 +1770,7 @@ def _fill_with_ghostzones(self, content, dest, selector, offset): levels[:] = 0 dest.update((field, np.empty(cell_count, dtype="float64")) for field in content) # Make references ... - count = oct_handler.fill_level( - 0, levels, cell_inds, file_inds, dest, content, offset - ) + oct_handler.fill_level(0, levels, cell_inds, file_inds, dest, content, offset) def fill(self, content, dest, selector, offset): if self._num_ghost_zones == 0: diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 331b56fb39b..3a26d42a9d6 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -2,10 +2,7 @@ from yt.data_objects.api import ImageArray from yt.funcs import iterable, mylog -from yt.geometry.grid_geometry_handler import GridIndex -from yt.geometry.oct_geometry_handler import OctreeIndex from yt.units.unit_object import Unit -from yt.utilities.lib.partitioned_grid import PartitionedGrid from yt.utilities.lib.pixelization_routines import ( normalization_2d_utility, off_axis_projection_SPH, From cde25a88935c5a0a7b6fa17602bb27b7f2e8a864 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 5 Aug 2020 14:42:05 +0200 Subject: [PATCH 454/653] Fix off axis projection --- yt/visualization/volume_rendering/off_axis_projection.py | 8 ++++++-- yt/visualization/volume_rendering/render_source.py | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 3a26d42a9d6..17eefe6a004 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -106,7 +106,7 @@ def off_axis_projection( >>> write_image(np.log10(image), "offaxis.png") """ - if method not in ["integrate", "sum"]: + if method not in ("integrate", "sum"): raise NotImplementedError( "Only 'integrate' or 'sum' methods are valid for off-axis-projections" ) @@ -303,6 +303,7 @@ def off_axis_projection( funits = data_source.ds._get_field_info(item).units vol = create_volume_source(data_source, item) + vol.num_threads = num_threads if weight is None: vol.set_field(item) else: @@ -364,8 +365,11 @@ def temp_weightfield(a, b): if vol.weight_field is not None: fields.append(vol.weight_field) + vol._log_field = False image = vol.render(camera) - + image = ImageArray( + image, funits, registry=data_source.ds.unit_registry, info=image.info + ) if weight is not None: data_source.ds.field_info.pop(("index", "temp_weightfield")) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index 8a52c395238..c33e8f85af7 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -54,7 +54,7 @@ def invalidate_volume(f): def wrapper(*args, **kwargs): ret = f(*args, **kwargs) obj = args[0] - if isinstance(obj._transfer_function, ProjectionTransferFunction): + if isinstance(obj.transfer_function, ProjectionTransferFunction): obj.sampler_type = "projection" obj._log_field = False obj._use_ghost_zones = False From 4f6224301b964a0bac89fdf06843a05f386691e7 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 14:52:45 +0100 Subject: [PATCH 455/653] Update yt/data_objects/static_output.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/data_objects/static_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index c29eccced56..d7d42cd1fe9 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1119,7 +1119,7 @@ def set_units(self): if getattr(self, "current_redshift", None): # Comoving lengths for my_unit in ["m", "pc", "AU", "au"]: - new_unit = "%scm" % my_unit + new_unit = f"{my_unit}cm" my_u = Unit(my_unit, registry=self.unit_registry) self.unit_registry.add( new_unit, From 0d14660d3c7fd450ce671956b54646c63eb13a13 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 15:57:20 +0100 Subject: [PATCH 456/653] Remove unused function. --- yt/frontends/halo_catalog/data_structures.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 133b468e012..7631b231958 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -215,9 +215,6 @@ def _calculate_particle_index_starts(self): ] ) - def _create_halo_id_table(self): - pass - def _detect_output_fields(self): field_list = [] scalar_field_list = [] @@ -314,7 +311,6 @@ def _setup_data_io(self): setattr(self, attr, getattr(self.real_ds.index, attr)) self._calculate_particle_index_starts() - self._create_halo_id_table() class HaloDataset(ParticleDataset): From 2ebac45331670d74ac1345b53c6fb1a421a7e473 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 16:12:12 +0100 Subject: [PATCH 457/653] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/data_objects/static_output.py | 2 +- yt/frontends/gadget_fof/data_structures.py | 3 ++- yt/frontends/halo_catalog/io.py | 4 ++-- yt/frontends/halo_catalog/tests/test_outputs.py | 5 ++--- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index d7d42cd1fe9..fe4ee672461 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1125,7 +1125,7 @@ def set_units(self): new_unit, my_u.base_value / (1 + self.current_redshift), dimensions.length, - "\\rm{%s}/(1+z)" % my_unit, + f"\\rm\{{my_unit}\}/(1+z)", prefixable=True, ) self.unit_registry.modify("a", 1 / (1 + self.current_redshift)) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index dbc90b51b3a..bfa9a734ac5 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -498,7 +498,8 @@ def _set_code_unit_attributes(self): setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) @classmethod - def _is_valid(self, *args, **kwargs): + def _is_valid(cls, *args, **kwargs): + # This class is not meant to be instanciated by yt.load() return False diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index 7703fb33454..d8365918a27 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -148,7 +148,7 @@ def _read_member_fields(self, dobj, member_fields): if pcount == 0: continue field_end = field_start + end_index - start_index - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: for ptype, field_list in sorted(member_fields.items()): for field in field_list: field_data = all_data[(ptype, field)] @@ -163,7 +163,7 @@ def _read_scalar_fields(self, dobj, scalar_fields): all_data = {} if not scalar_fields: return all_data - with h5py.File(dobj.scalar_data_file.filename, "r") as f: + with h5py.File(dobj.scalar_data_file.filename, mode="r") as f: for ptype, field_list in sorted(scalar_fields.items()): for field in field_list: data = np.array([f[field][dobj.scalar_index]]).astype("float64") diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index 7b1214d99e1..b099d7c0298 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -108,14 +108,13 @@ def test_halo_quantities(): hid = int(ad["halos", "particle_identifier"][i]) halo = ds.halo("halos", hid) for field in ["mass", "position", "velocity"]: - v1 = ad["halos", "particle_%s" % field][i] + v1 = ad["halos", f"particle_{field][i]}" v2 = getattr(halo, field) assert_allclose_units( - v1, v2, rtol=1e-15, err_msg="Halo %d %s field mismatch." % (hid, field) + v1, v2, rtol=1e-15, err_msg="Halo {hid} {field} field mismatch." ) -t46 = "tiny_fof_halos/DD0046/DD0046.0.h5" @requires_file(t46) From dbde3826423e4dd1f08fcae202a17b4e6083af66 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 16:18:00 +0100 Subject: [PATCH 458/653] Undo f-string conversion. --- yt/data_objects/static_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index fe4ee672461..cf634898c7d 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1125,7 +1125,7 @@ def set_units(self): new_unit, my_u.base_value / (1 + self.current_redshift), dimensions.length, - f"\\rm\{{my_unit}\}/(1+z)", + "\\rm\{%s\}/(1+z)" % my_unit, prefixable=True, ) self.unit_registry.modify("a", 1 / (1 + self.current_redshift)) From 28d4682bd014ae6d23c7a76c5c3ca49695ca9291 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 16:32:04 +0100 Subject: [PATCH 459/653] Fix flake8 errors. --- yt/frontends/gadget_fof/data_structures.py | 2 +- yt/frontends/halo_catalog/tests/test_outputs.py | 3 --- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index bfa9a734ac5..72243c32766 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -499,7 +499,7 @@ def _set_code_unit_attributes(self): @classmethod def _is_valid(cls, *args, **kwargs): - # This class is not meant to be instanciated by yt.load() + # This class is not meant to be instanciated by yt.load() return False diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index b099d7c0298..e59a25735ea 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -114,9 +114,6 @@ def test_halo_quantities(): v1, v2, rtol=1e-15, err_msg="Halo {hid} {field} field mismatch." ) - - - @requires_file(t46) @requires_module("h5py") def test_halo_particles(): From 490ad69e2ac8ee93e62b1dd10c498f2d043c87be Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 17:24:33 +0100 Subject: [PATCH 460/653] Fix fstring and change to assert_equal. --- yt/frontends/halo_catalog/tests/test_outputs.py | 7 ++----- 1 file changed, 2 insertions(+), 5 deletions(-) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index e59a25735ea..390ab782818 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -5,7 +5,6 @@ from yt.frontends.ytdata.utilities import save_as_dataset from yt.testing import ( TempDirTest, - assert_allclose_units, assert_array_equal, assert_equal, requires_file, @@ -108,11 +107,9 @@ def test_halo_quantities(): hid = int(ad["halos", "particle_identifier"][i]) halo = ds.halo("halos", hid) for field in ["mass", "position", "velocity"]: - v1 = ad["halos", f"particle_{field][i]}" + v1 = ad["halos", f"particle_{field}"][i] v2 = getattr(halo, field) - assert_allclose_units( - v1, v2, rtol=1e-15, err_msg="Halo {hid} {field} field mismatch." - ) + assert_equal(v1, v2, err_msg=f"Halo {hid} {field} field mismatch.") @requires_file(t46) @requires_module("h5py") From 07711a783a8a6ec5435e5d30b55fe5362a09c59e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 15:25:58 +0200 Subject: [PATCH 461/653] Improve some error messages, replace a non-blocking RuntimeError with a runtime warning --- yt/data_objects/construction_data_containers.py | 14 +++++++++++++- yt/fields/field_exceptions.py | 3 ++- yt/geometry/selection_routines.pyx | 8 +++++--- 3 files changed, 20 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 9818b3ccdbf..c5798015f9a 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -1,6 +1,7 @@ import fileinput import io import os +import warnings import zipfile from functools import wraps from re import finditer @@ -1348,6 +1349,8 @@ def _fill_fields(self, fields): if not iterable(self.ds.refine_by): refine_by = [refine_by, refine_by, refine_by] refine_by = np.array(refine_by, dtype="i8") + + runtime_errors_count = 0 for level in range(self.level + 1): if level < min_level: self._update_level_state(ls) @@ -1373,8 +1376,17 @@ def _fill_fields(self, fields): refine_by, ) if level == 0 and tot != 0: - raise RuntimeError + runtime_errors_count += 1 self._update_level_state(ls) + if runtime_errors_count: + warnings.warn( + "Something went wrong during field computation. " + "This is likely due to missing ghost-zones support " + "in class %s", + self.ds.__class__, + category=RuntimeWarning, + ) + mylog.debug(f"Caught {runtime_errors_count} runtime errors.") for name, v in zip(fields, ls.fields): if self.level > 0: v = v[1:-1, 1:-1, 1:-1] diff --git a/yt/fields/field_exceptions.py b/yt/fields/field_exceptions.py index d37e8653284..3e882b25fe7 100644 --- a/yt/fields/field_exceptions.py +++ b/yt/fields/field_exceptions.py @@ -8,7 +8,8 @@ def __init__(self, ghost_zones=0, fields=None): self.fields = fields def __str__(self): - return f"({self.ghost_zones}, {self.fields})" + s = "s" if self.ghost_zones != 1 else "" + return f"fields {self.fields} require {self.ghost_zones} ghost zone{s}." class NeedsOriginalGrid(NeedsGridType): diff --git a/yt/geometry/selection_routines.pyx b/yt/geometry/selection_routines.pyx index 8821e454d66..f232c121965 100644 --- a/yt/geometry/selection_routines.pyx +++ b/yt/geometry/selection_routines.pyx @@ -999,14 +999,16 @@ cdef class RegionSelector(SelectorObject): else: if LE[i] < DLE[i] or RE[i] > DRE[i]: raise RuntimeError( - "Error: yt attempted to read outside the boundaries of " + "yt attempted to read outside the boundaries of " "a non-periodic domain along dimension %s.\n" "Region left edge = %s, Region right edge = %s\n" "Dataset left edge = %s, Dataset right edge = %s\n\n" "This commonly happens when trying to compute ghost cells " "up to the domain boundary. Two possible solutions are to " - "load a smaller region that does not border the edge or " - "override the periodicity for this dataset." % \ + "select a smaller region that does not border domain edge " + "(see https://yt-project.org/docs/analyzing/objects.html?highlight=region)\n" + "or override the periodicity with e.g\n" + "ds.periodicity = 3*[True]" % \ (i, dobj.left_edge[i], dobj.right_edge[i], dobj.ds.domain_left_edge[i], dobj.ds.domain_right_edge[i]) ) From 5b59177d25402e732691ffbc31054bb0e8f7ab1b Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 17:26:17 +0100 Subject: [PATCH 462/653] Convert to fstring. --- yt/frontends/halo_catalog/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 7631b231958..3d4748f186e 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -474,7 +474,7 @@ def _set_io_data(self): ) def __repr__(self): - return "%s_%s_%09d" % (self.ds, self.ptype, self.particle_identifier) + return f"{self.ds}_{self.ptype}_{self.particle_identifier:09d}" class YTHaloCatalogHaloContainer(HaloContainer): From a92da0934cabc0e802fc34cd9d40dcc809ee5db0 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 19:01:19 +0100 Subject: [PATCH 463/653] Run black. --- yt/frontends/halo_catalog/tests/test_outputs.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index c22d668b380..e09d1d54742 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -109,6 +109,7 @@ def test_halo_quantities(): v2 = getattr(halo, field) assert_equal(v1, v2, err_msg=f"Halo {hid} {field} field mismatch.") + @requires_file(t46) @requires_module("h5py") def test_halo_particles(): From 85d0b65a670fe719404875e42a9e59dec7c7ae5c Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 19:10:17 +0100 Subject: [PATCH 464/653] Flynt some. --- yt/frontends/halo_catalog/data_structures.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index d6ef7f9dcd5..cb592bd1220 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -357,12 +357,12 @@ def _parse_parameter_file(self): def set_code_units(self): for unit in ["length", "time", "mass", "velocity", "magnetic", "temperature"]: - my_unit = "%s_unit" % unit + my_unit = f"{unit}_unit" setattr(self, my_unit, getattr(self.real_ds, my_unit, None)) self.unit_registry = self.real_ds.unit_registry def __repr__(self): - return "%s" % self.real_ds + return f"{self.real_ds}" def _setup_classes(self): self.objects = [] @@ -401,8 +401,7 @@ class HaloContainer(YTSelectionContainer): def __init__(self, ptype, particle_identifier, ds=None): if ptype not in ds.particle_types_raw: raise RuntimeError( - 'Possible halo types are %s, supplied "%s".' - % (ds.particle_types_raw, ptype) + f'Possible halo types are {ds.particle_types_raw}, supplied "{ptype}".' ) self.ptype = ptype From 90360d6493b59f8ccabb53bd2b4beb23e5b1ea6f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Wed, 12 Aug 2020 21:03:49 +0100 Subject: [PATCH 465/653] Check dataset type explicitly in tests. --- yt/frontends/halo_catalog/tests/test_outputs.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index e09d1d54742..c371a5a0491 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -51,7 +51,7 @@ def test_halo_catalog(self): fn = fake_halo_catalog(data) ds = yt_load(fn) - assert isinstance(ds, YTHaloCatalogDataset) + assert type(ds) is YTHaloCatalogDataset for field in fields: f1 = data[field].in_base() @@ -83,7 +83,7 @@ def test_halo_catalog_boundary_particles(self): fn = fake_halo_catalog(data) ds = yt_load(fn) - assert isinstance(ds, YTHaloCatalogDataset) + assert type(ds) is YTHaloCatalogDataset for field in ["particle_mass"]: f1 = data[field].in_base() From 75bd8cd9c0109c05a254157d38050bd22d5fb9a6 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 13 Aug 2020 11:37:31 +0100 Subject: [PATCH 466/653] Allow redshift 0; stop dragging on the present day. --- yt/data_objects/static_output.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 6a3a405b6c1..0d183cd430d 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1116,7 +1116,7 @@ def set_units(self): if getattr(self, "cosmological_simulation", False): # this dataset is cosmological, so add cosmological units. self.unit_registry.modify("h", self.hubble_constant) - if getattr(self, "current_redshift", None): + if getattr(self, "current_redshift", None) is not None: # Comoving lengths for my_unit in ["m", "pc", "AU", "au"]: new_unit = f"{my_unit}cm" @@ -1157,7 +1157,7 @@ def setup_cosmology(self): w_a=w_a, ) - if getattr(self, "current_redshift", None): + if getattr(self, "current_redshift", None) is not None: self.critical_density = self.cosmology.critical_density( self.current_redshift ) From e318a8cfd3c95b73f72c71f651f07a0449a07cb9 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 13 Aug 2020 11:50:52 +0100 Subject: [PATCH 467/653] Cleanup black mess. --- yt/frontends/halo_catalog/data_structures.py | 26 +++++++------------- 1 file changed, 9 insertions(+), 17 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index cb592bd1220..e82bd551cb6 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -203,28 +203,20 @@ def _calculate_particle_index_starts(self): particle_count[ptype] += data_file.total_particles[ptype] offset_count += getattr(data_file, "total_offset", 0) - self._halo_index_start = dict( - [ - ( - ptype, - np.array( - [data_file.index_start[ptype] for data_file in self.data_files] - ), - ) - for ptype in self.ds.particle_types_raw - ] - ) + self._halo_index_start = {} + for ptype in self.ds.particle_types_raw: + d = [data_file.index_start[ptype] for data_file in self.data_files] + self._halo_index_start.update({ptype: np.array(d)}) def _detect_output_fields(self): field_list = [] scalar_field_list = [] units = {} - pc = dict( - [ - (ptype, sum([d.total_particles[ptype] for d in self.data_files])) - for ptype in self.ds.particle_types_raw - ] - ) + + pc = {} + for ptype in self.ds.particle_types_raw: + d = [df.total_particles[ptype] for df in self.data_files] + pc.update({ptype: sum(d)}) found_fields = dict([(ptype, False) for ptype, pnum in pc.items() if pnum > 0]) has_ids = False From 70a8a497e79700330822d6bb05532666c901518d Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 13 Aug 2020 12:19:17 +0100 Subject: [PATCH 468/653] Add comment. --- yt/frontends/halo_catalog/data_structures.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index e82bd551cb6..0eb56812efd 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -240,6 +240,12 @@ def _detect_output_fields(self): ds.particle_types_raw = ds.particle_types def _get_halo_file_indices(self, ptype, identifiers): + """ + Get the index of the data file list where this halo lives. + + Digitize returns i such that bins[i-1] <= x < bins[i], so we subtract + one because we will open data file i. + """ return np.digitize(identifiers, self._halo_index_start[ptype], right=False) - 1 def _get_halo_scalar_index(self, ptype, identifier): From d9f7b1d739c4cb2f4f7f00010230161fd51c9f1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 20 Jul 2020 10:35:49 +0200 Subject: [PATCH 469/653] cleanup: replace AssertionError raise statements where undue --- yt/geometry/grid_geometry_handler.py | 2 +- yt/geometry/tests/test_grid_container.py | 2 +- yt/visualization/plot_window.py | 20 +++++++++++++------- 3 files changed, 15 insertions(+), 9 deletions(-) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 56a5c254f14..696ca7109cb 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -297,7 +297,7 @@ def _find_points(self, x, y, z): y = ensure_numpy_array(y) z = ensure_numpy_array(z) if not len(x) == len(y) == len(z): - raise AssertionError("Arrays of indices must be of the same size") + raise ValueError("Arrays of indices must be of the same size") grid_tree = self._get_grid_tree() pts = MatchPointsToGrids(grid_tree, len(x), x, y, z) diff --git a/yt/geometry/tests/test_grid_container.py b/yt/geometry/tests/test_grid_container.py index b9418831e19..3f0d4d7cf9e 100644 --- a/yt/geometry/tests/test_grid_container.py +++ b/yt/geometry/tests/test_grid_container.py @@ -133,7 +133,7 @@ def test_find_points(): assert_equal(point_grid_inds, grid_inds[ind]) # Test if find_points fails properly for non equal indices' array sizes - assert_raises(AssertionError, test_ds.index._find_points, [0], 1.0, [2, 3]) + assert_raises(ValueError, test_ds.index._find_points, [0], 1.0, [2, 3]) def test_grid_arrays_view(): diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 527a2f98905..3368a4ff0b7 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -2352,17 +2352,23 @@ def SlicePlot(ds, normal=None, fields=None, axis=None, *args, **kwargs): ... north_vector=[0.2,-0.3,0.1]) """ - # Make sure we are passed a normal - # we check the axis keyword for backwards compatibility - if normal is None: + if axis is not None: + issue_deprecation_warning( + "SlicePlot's argument 'axis' is a deprecated alias for 'normal', it " + "will be removed in a future version of yt." + ) + if normal is not None: + raise TypeError( + "SlicePlot() received incompatible arguments 'axis' and 'normal'" + ) normal = axis + + # to keep positional ordering we had to make 'normal' and 'fields' keywords if normal is None: - raise AssertionError("Must pass a normal vector to the slice!") + raise TypeError("Missing argument in SlicePlot(): 'normal'") - # to keep positional ordering we had to make fields a keyword; make sure - # it is present if fields is None: - raise AssertionError("Must pass field(s) to plot!") + raise TypeError("Missing argument in SlicePlot(): 'fields'") # use an AxisAlignedSlicePlot where possible, e.g.: # maybe someone passed normal=[0,0,0.2] when they should have just used "z" From f789c6999045df9d6bf58e33b5fda60cffe65eeb Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 13 Aug 2020 15:56:55 +0100 Subject: [PATCH 470/653] Last few comments. --- yt/frontends/halo_catalog/data_structures.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 0eb56812efd..52fae36e5c7 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -264,6 +264,7 @@ def _get_halo_values(self, ptype, identifiers, fields, f=None): data = defaultdict(lambda: np.empty(identifiers.size)) i_scalars = self._get_halo_file_indices(ptype, identifiers) for i_scalar in np.unique(i_scalars): + # mask array to get field data for this halo target = i_scalars == i_scalar scalar_indices = identifiers - self._halo_index_start[ptype][i_scalar] @@ -291,10 +292,10 @@ def _read_halo_particle_field(self, fh, ptype, field, indices): return fh[field][indices] def _read_particle_fields(self, fields, dobj, chunk=None): - if len(fields) == 0: + if not fields: return {}, [] fields_to_read, fields_to_generate = self._split_fields(fields) - if len(fields_to_read) == 0: + if not fields_to_read: return {}, fields_to_generate fields_to_return = self.io._read_particle_selection(dobj, fields_to_read) return fields_to_return, fields_to_generate @@ -383,6 +384,7 @@ def _set_code_unit_attributes(self): @classmethod def _is_valid(self, *args, **kwargs): + # We don't ever want this to be loaded by yt.load. return False From df02b3dab8c99acc090814cf9a3c144279aca0dd Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 13 Aug 2020 10:15:04 -0500 Subject: [PATCH 471/653] Avoid generating kdtree until it's really required. Fixes #2854 --- yt/frontends/arepo/io.py | 2 +- yt/frontends/gadget/io.py | 4 +++- yt/frontends/sph/data_structures.py | 2 +- yt/frontends/tipsy/io.py | 5 +++-- 4 files changed, 8 insertions(+), 5 deletions(-) diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py index 1801024a080..9147e74e2ca 100644 --- a/yt/frontends/arepo/io.py +++ b/yt/frontends/arepo/io.py @@ -7,7 +7,7 @@ class IOHandlerArepoHDF5(IOHandlerGadgetHDF5): _dataset_type = "arepo_hdf5" - def _generate_smoothing_length(self, data_files, kdtree): + def _generate_smoothing_length(self, index): # This is handled below in _get_smoothing_length return diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 2808cfec7bd..500ed82f03a 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -87,7 +87,8 @@ def _yield_coordinates(self, data_file, needed_ptype=None): yield key, pos f.close() - def _generate_smoothing_length(self, data_files, kdtree): + def _generate_smoothing_length(self, index): + data_files = index.data_files if not self.ds.gen_hsmls: return hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5") @@ -116,6 +117,7 @@ def _generate_smoothing_length(self, data_files, kdtree): for fn, count in counts.items(): offsets[fn] = offset offset += count + kdtree = index.kdtree positions = uconcatenate(positions)[kdtree.idx] hsml = generate_smoothing_length(positions, kdtree, self.ds._num_neighbors) dtype = positions.dtype diff --git a/yt/frontends/sph/data_structures.py b/yt/frontends/sph/data_structures.py index 01a84c3d4d1..ca8f9166614 100644 --- a/yt/frontends/sph/data_structures.py +++ b/yt/frontends/sph/data_structures.py @@ -84,7 +84,7 @@ def _initialize_index(self): ds._file_hash = self._generate_hash() if hasattr(self.io, "_generate_smoothing_length"): - self.io._generate_smoothing_length(self.data_files, self.kdtree) + self.io._generate_smoothing_length(self) super(SPHParticleIndex, self)._initialize_index() diff --git a/yt/frontends/tipsy/io.py b/yt/frontends/tipsy/io.py index 7010c6813cd..6acb51ed3f4 100644 --- a/yt/frontends/tipsy/io.py +++ b/yt/frontends/tipsy/io.py @@ -117,7 +117,7 @@ def _read_particle_coords(self, chunks, ptf): def hsml_filename(self): return f"{self.ds.parameter_filename}-{'hsml'}" - def _generate_smoothing_length(self, data_files, kdtree): + def _generate_smoothing_length(self, index): if os.path.exists(self.hsml_filename): with open(self.hsml_filename, "rb") as f: file_hash = struct.unpack("q", f.read(struct.calcsize("q")))[0] @@ -126,13 +126,14 @@ def _generate_smoothing_length(self, data_files, kdtree): else: return positions = [] - for data_file in data_files: + for data_file in index.data_files: for _, ppos in self._yield_coordinates( data_file, needed_ptype=self.ds._sph_ptypes[0] ): positions.append(ppos) if positions == []: return + kdtree = index.kdtree positions = np.concatenate(positions)[kdtree.idx] hsml = generate_smoothing_length(positions, kdtree, self.ds._num_neighbors) hsml = hsml[np.argsort(kdtree.idx)] From 20ef4b3c11ad79ab80f30e043973c3967c3871d0 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Thu, 13 Aug 2020 11:00:25 -0500 Subject: [PATCH 472/653] Fix flake8 --- yt/data_objects/selection_objects/cut_region.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/selection_objects/cut_region.py b/yt/data_objects/selection_objects/cut_region.py index 8399e95e844..679428fb20e 100644 --- a/yt/data_objects/selection_objects/cut_region.py +++ b/yt/data_objects/selection_objects/cut_region.py @@ -122,7 +122,8 @@ def _cond_ind(self): locals = self.locals.copy() if "obj" in locals: raise RuntimeError( - '"obj" has been defined in the "locals" ; this is not supported, please rename the variable.' + '"obj" has been defined in the "locals" ; ' + "this is not supported, please rename the variable." ) locals["obj"] = obj with obj._field_parameter_state(self.field_parameters): From 214a6da08779f29d5d9a46a31c10e88a01ae18ab Mon Sep 17 00:00:00 2001 From: Patrick Shriwise Date: Wed, 12 Aug 2020 06:24:06 -0500 Subject: [PATCH 473/653] Updating MOAB frontend files to use pymoab. --- yt/frontends/moab/data_structures.py | 28 +++++++++++++++------------- 1 file changed, 15 insertions(+), 13 deletions(-) diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 6c12c925e52..e69bf455983 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -121,15 +121,20 @@ def __init__(self, ds, dataset_type="moab_hex8_pyne"): super(PyneMeshHex8Hierarchy, self).__init__(ds, dataset_type) def _initialize_mesh(self): - from itaps import iBase, iMesh + from pymoab import core, types ents = list(self.pyne_mesh.structured_iterate_vertex()) - coords = self.pyne_mesh.mesh.getVtxCoords(ents).astype("float64") - vind = self.pyne_mesh.mesh.rootSet.getAdjEntIndices( - iBase.Type.region, iMesh.Topology.hexahedron, iBase.Type.vertex - )[1].indices.data.astype("int64") - # Divide by float so it throws an error if it's not 8 - vind.shape = (vind.shape[0] / 8.0, 8) + coords = self.pyne_mesh.mesh.get_coords(ents).astype("float64") + hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX) + vind = [] + for h in hexes: + adj = self.pyne_mesh.mesh.get_adjacencies(h, + 0, + create_if_missing = True, + op_type=types.UNION) + vind += list(adj) + vind = np.asarray(vind) + vind.shape = (int(vind.shape[0] / 8), 8) self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)] def _detect_output_fields(self): @@ -174,13 +179,10 @@ def _set_code_unit_attributes(self): setdefaultattr(self, "mass_unit", self.quan(1.0, "g")) def _parse_parameter_file(self): - # not sure if this import has side-effects so I'm not deleting it - from itaps import iBase # NOQA - ents = list(self.pyne_mesh.structured_iterate_vertex()) - coords = self.pyne_mesh.mesh.getVtxCoords(ents) - self.domain_left_edge = coords[0] - self.domain_right_edge = coords[-1] + coords = self.pyne_mesh.mesh.get_coords(ents) + self.domain_left_edge = coords[0:3] + self.domain_right_edge = coords[-3:] self.domain_dimensions = self.domain_right_edge - self.domain_left_edge self.refine_by = 2 self.dimensionality = len(self.domain_dimensions) From 75bd3a32ab4c2911adb52e99a550ffe8bde616d0 Mon Sep 17 00:00:00 2001 From: Baptiste Mouginot Date: Thu, 13 Aug 2020 13:34:35 -0500 Subject: [PATCH 474/653] fix pyMOAB front end when calling PyNE MESH --- yt/frontends/moab/data_structures.py | 3 ++- yt/frontends/moab/io.py | 5 +++-- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index e69bf455983..828b203e71b 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -125,6 +125,7 @@ def _initialize_mesh(self): ents = list(self.pyne_mesh.structured_iterate_vertex()) coords = self.pyne_mesh.mesh.get_coords(ents).astype("float64") + coords = np.reshape(coords, (int(len(coords)/3), 3)) hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX) vind = [] for h in hexes: @@ -133,7 +134,7 @@ def _initialize_mesh(self): create_if_missing = True, op_type=types.UNION) vind += list(adj) - vind = np.asarray(vind) + vind = np.asarray(vind, dtype=np.int64) vind.shape = (int(vind.shape[0] / 8), 8) self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)] diff --git a/yt/frontends/moab/io.py b/yt/frontends/moab/io.py index 435bdc70adc..7d68f353546 100644 --- a/yt/frontends/moab/io.py +++ b/yt/frontends/moab/io.py @@ -61,8 +61,9 @@ def _read_fluid_selection(self, chunks, selector, fields, size): for field in fields: ftype, fname = field if pyne_mesh.structured: - tag = pyne_mesh.mesh.getTagHandle("idx") - indices = [tag[ent] for ent in pyne_mesh.structured_iterate_hex()] + tag = pyne_mesh.mesh.tag_get_handle("idx") + hex_list = [ent for ent in pyne_mesh.structured_iterate_hex()] + indices = pyne_mesh.mesh.tag_get_data(tag, hex_list).flatten() else: indices = slice(None) ds = np.asarray(getattr(pyne_mesh, fname)[indices], "float64") From db6b74dc92f5a3ccf955a66b6a917d51b7cc1de7 Mon Sep 17 00:00:00 2001 From: Baptiste Mouginot Date: Thu, 13 Aug 2020 13:40:49 -0500 Subject: [PATCH 475/653] fix syntax using black --- yt/frontends/moab/data_structures.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 828b203e71b..63ba8da04bd 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -121,18 +121,17 @@ def __init__(self, ds, dataset_type="moab_hex8_pyne"): super(PyneMeshHex8Hierarchy, self).__init__(ds, dataset_type) def _initialize_mesh(self): - from pymoab import core, types + from pymoab import types ents = list(self.pyne_mesh.structured_iterate_vertex()) coords = self.pyne_mesh.mesh.get_coords(ents).astype("float64") - coords = np.reshape(coords, (int(len(coords)/3), 3)) + coords = np.reshape(coords, (int(len(coords) / 3), 3)) hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX) vind = [] for h in hexes: - adj = self.pyne_mesh.mesh.get_adjacencies(h, - 0, - create_if_missing = True, - op_type=types.UNION) + adj = self.pyne_mesh.mesh.get_adjacencies( + h, 0, create_if_missing=True, op_type=types.UNION + ) vind += list(adj) vind = np.asarray(vind, dtype=np.int64) vind.shape = (int(vind.shape[0] / 8), 8) From 87ce5b66a75344661772bb8143c43a045143fac8 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Thu, 13 Aug 2020 20:19:46 +0100 Subject: [PATCH 476/653] Convert data_files and total_particles into properties. --- yt/frontends/art/data_structures.py | 3 -- yt/frontends/gadget_fof/data_structures.py | 20 +++++-------- yt/frontends/halo_catalog/data_structures.py | 3 -- yt/frontends/owls_subfind/data_structures.py | 1 - yt/geometry/particle_geometry_handler.py | 30 +++++++++++++++++--- 5 files changed, 33 insertions(+), 24 deletions(-) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index 980e20171c8..4c40f59335f 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -414,9 +414,6 @@ def _setup_filenames(self): df = cls(self.dataset, self.io, template % {"num": i}, fi) fi += 1 self.data_files.append(df) - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) class DarkMatterARTDataset(ARTDataset): diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index bc17b83ed6e..8e65b1b214d 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -93,22 +93,16 @@ def _detect_output_fields(self): ds.particle_types_raw = ds.particle_types def _setup_filenames(self): - if not hasattr(self, "data_files"): - template = self.ds.filename_template - ndoms = self.ds.file_count - cls = self.ds._file_class - self.data_files = [ - cls(self.ds, self.io, template % {"num": i}, i, frange=None) - for i in range(ndoms) - ] - if not hasattr(self, "total_particles"): - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) + template = self.ds.filename_template + ndoms = self.ds.file_count + cls = self.ds._file_class + self.data_files = [ + cls(self.ds, self.io, template % {"num": i}, i, frange=None) + for i in range(ndoms) + ] def _setup_data_io(self): super(GadgetFOFParticleIndex, self)._setup_data_io() - self._setup_filenames() self._calculate_particle_count() self._calculate_particle_index_starts() self._calculate_file_offset_map() diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 5984121ed3a..ed386707e15 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -31,9 +31,6 @@ def _setup_filenames(self): range=None, ) ] - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) class HaloCatalogFile(ParticleFile): diff --git a/yt/frontends/owls_subfind/data_structures.py b/yt/frontends/owls_subfind/data_structures.py index 7909feed510..7e4e27a891d 100644 --- a/yt/frontends/owls_subfind/data_structures.py +++ b/yt/frontends/owls_subfind/data_structures.py @@ -50,7 +50,6 @@ def _calculate_file_offset_map(self): def _detect_output_fields(self): # TODO: Add additional fields - self._setup_filenames() self._calculate_particle_index_starts() self._calculate_file_offset_map() dsl = [] diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 449d7bc7cd1..dd546ad4ff7 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -45,6 +45,32 @@ def _get_particle_type_counts(self): def convert(self, unit): return self.dataset.conversion_factors[unit] + _data_files = None + + @property + def data_files(self): + if self._data_files is not None: + return self._data_files + + self._setup_filenames() + return self._data_files + + @data_files.setter + def data_files(self, value): + self._data_files = value + + _total_particles = None + + @property + def total_particles(self): + if self._total_particles is not None: + return self._total_particles + + self._total_particles = sum( + sum(d.total_particles.values()) for d in self.data_files + ) + return self._total_particles + def _setup_filenames(self): template = self.dataset.filename_template ndoms = self.dataset.file_count @@ -62,9 +88,6 @@ def _setup_filenames(self): self.data_files.append(df) start = end end += CHUNKSIZE - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) def _initialize_index(self): ds = self.dataset @@ -233,7 +256,6 @@ def _initialize_refined_index(self): def _detect_output_fields(self): # TODO: Add additional fields - self._setup_filenames() dsl = [] units = {} pcounts = self._get_particle_type_counts() From 282b035acf8a8ddc73910c78c893897f17eaf83c Mon Sep 17 00:00:00 2001 From: Baptiste Mouginot <15145274+bam241@users.noreply.github.com> Date: Thu, 13 Aug 2020 16:10:24 -0500 Subject: [PATCH 477/653] Apply suggestions from code review MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/frontends/moab/data_structures.py | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 63ba8da04bd..4b844ad36d5 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -125,16 +125,15 @@ def _initialize_mesh(self): ents = list(self.pyne_mesh.structured_iterate_vertex()) coords = self.pyne_mesh.mesh.get_coords(ents).astype("float64") - coords = np.reshape(coords, (int(len(coords) / 3), 3)) + coords = coords.reshape(len(coords) // 3, 3) hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX) vind = [] for h in hexes: - adj = self.pyne_mesh.mesh.get_adjacencies( + vind.append(self.pyne_mesh.mesh.get_adjacencies( h, 0, create_if_missing=True, op_type=types.UNION ) - vind += list(adj) vind = np.asarray(vind, dtype=np.int64) - vind.shape = (int(vind.shape[0] / 8), 8) + vind = vind.reshape(len(vind) // 8, 8) self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)] def _detect_output_fields(self): From 7f191e6ddb7f2a258d011828b79e31ae47b87516 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 13 Aug 2020 23:33:06 +0200 Subject: [PATCH 478/653] fix missing parenthesis --- yt/frontends/moab/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index 4b844ad36d5..a7ff7d6b016 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -131,7 +131,7 @@ def _initialize_mesh(self): for h in hexes: vind.append(self.pyne_mesh.mesh.get_adjacencies( h, 0, create_if_missing=True, op_type=types.UNION - ) + )) vind = np.asarray(vind, dtype=np.int64) vind = vind.reshape(len(vind) // 8, 8) self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)] From fe13b28d33f08e17c33441d3f574a9c00773cff8 Mon Sep 17 00:00:00 2001 From: yt-fido Date: Thu, 13 Aug 2020 21:35:30 +0000 Subject: [PATCH 479/653] [black-command] fixes --- doc/source/cookbook/multiplot_export_to_mpl.py | 11 +++++------ yt/frontends/moab/data_structures.py | 8 +++++--- 2 files changed, 10 insertions(+), 9 deletions(-) diff --git a/doc/source/cookbook/multiplot_export_to_mpl.py b/doc/source/cookbook/multiplot_export_to_mpl.py index 35c54c05cba..ff11dd68568 100644 --- a/doc/source/cookbook/multiplot_export_to_mpl.py +++ b/doc/source/cookbook/multiplot_export_to_mpl.py @@ -2,16 +2,15 @@ ds = yt.load_sample("IsolatedGalaxy") -fields = ['density', 'velocity_x', 'velocity_y', 'velocity_magnitude'] -p = yt.SlicePlot(ds, 'z', fields) -p.set_log('velocity_x', False) -p.set_log('velocity_y', False) +fields = ["density", "velocity_x", "velocity_y", "velocity_magnitude"] +p = yt.SlicePlot(ds, "z", fields) +p.set_log("velocity_x", False) +p.set_log("velocity_y", False) # this returns a matplotlib figure with an ImageGrid and the slices # added to the grid of axes (in this case, 2x2) -fig = p.export_to_mpl_figure((2,2)) +fig = p.export_to_mpl_figure((2, 2)) fig.tight_layout() fig.savefig("multiplot_export_to_mpl.png") - diff --git a/yt/frontends/moab/data_structures.py b/yt/frontends/moab/data_structures.py index a7ff7d6b016..4c20432954b 100644 --- a/yt/frontends/moab/data_structures.py +++ b/yt/frontends/moab/data_structures.py @@ -129,9 +129,11 @@ def _initialize_mesh(self): hexes = self.pyne_mesh.mesh.get_entities_by_type(0, types.MBHEX) vind = [] for h in hexes: - vind.append(self.pyne_mesh.mesh.get_adjacencies( - h, 0, create_if_missing=True, op_type=types.UNION - )) + vind.append( + self.pyne_mesh.mesh.get_adjacencies( + h, 0, create_if_missing=True, op_type=types.UNION + ) + ) vind = np.asarray(vind, dtype=np.int64) vind = vind.reshape(len(vind) // 8, 8) self.meshes = [PyneHex8Mesh(0, self.index_filename, vind, coords, self)] From 31d84817d8789e31dc11a9ac5abd2393b959d46c Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 10:07:08 +0100 Subject: [PATCH 480/653] Merge that! --- yt/frontends/adaptahop/data_structures.py | 3 --- yt/frontends/halo_catalog/data_structures.py | 1 - 2 files changed, 4 deletions(-) diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 787f53e5091..e14de1aed15 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -39,9 +39,6 @@ def _setup_filenames(self): self.data_files = [ cls(self.dataset, self.io, self.dataset.parameter_filename, 0, None,) ] - self.total_particles = sum( - sum(d.total_particles.values()) for d in self.data_files - ) class AdaptaHOPDataset(Dataset): diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index b73b69afa56..52fae36e5c7 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -22,7 +22,6 @@ class HaloCatalogFile(ParticleFile): """ Base class for data files of halo catalog datasets. ->>>>>>> master This is mainly here to correct for periodicity when reading particle positions. From 769a4ccc2d9e411d65f73415b4986ee8a107a79c Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 11:12:15 +0100 Subject: [PATCH 481/653] Remove the hasattr check that I added in another PR. --- yt/geometry/particle_geometry_handler.py | 3 --- 1 file changed, 3 deletions(-) diff --git a/yt/geometry/particle_geometry_handler.py b/yt/geometry/particle_geometry_handler.py index 574979d65af..dd546ad4ff7 100644 --- a/yt/geometry/particle_geometry_handler.py +++ b/yt/geometry/particle_geometry_handler.py @@ -72,9 +72,6 @@ def total_particles(self): return self._total_particles def _setup_filenames(self): - if hasattr(self, "data_files"): - return - template = self.dataset.filename_template ndoms = self.dataset.file_count cls = self.dataset._file_class From 715ddf15eebebf032b1d46977888a39f90b9ce3d Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 11:32:50 +0100 Subject: [PATCH 482/653] Split a long line. --- yt/data_objects/octree_subset.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 58a9e6a7ac2..600870a5b31 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -604,7 +604,8 @@ def octree_subset_with_gz(self): def get_vertex_centered_data(self, fields, smoothed=False, no_ghost=False): if no_ghost is True: raise NotImplementedError( - "get_vertex_centered_data without ghost zones for oct-based datasets has not been implemented." + "get_vertex_centered_data without ghost zones for " + "oct-based datasets has not been implemented." ) # Make sure the field list has only unique entries From 54feb5eed6bac57be4988ec08774dc8af3572d7f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 14:19:46 +0100 Subject: [PATCH 483/653] Switch order of ActiveDimensions. --- yt/visualization/fixed_resolution.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index 256e4b3df3e..750c7eb9b2f 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -534,7 +534,8 @@ def save_as_dataset(self, filename=None, fields=None): extra_attrs["con_args"] = self.data_source._con_args extra_attrs["left_edge"] = self.ds.arr([self.bounds[0], self.bounds[2]]) extra_attrs["right_edge"] = self.ds.arr([self.bounds[1], self.bounds[3]]) - extra_attrs["ActiveDimensions"] = self.buff_size + # The data dimensions are [NY, NX] but buff_size is [NX, NY]. + extra_attrs["ActiveDimensions"] = self.buff_size[::-1] extra_attrs["level"] = 0 extra_attrs["data_type"] = "yt_frb" extra_attrs["container_type"] = self.data_source._type_name From 85cd15f817b5f4286b255923d56014831cc0c851 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 14:31:12 +0100 Subject: [PATCH 484/653] Add test. --- yt/frontends/ytdata/tests/test_unit.py | 55 +++++++++++++++++++++++++- 1 file changed, 54 insertions(+), 1 deletion(-) diff --git a/yt/frontends/ytdata/tests/test_unit.py b/yt/frontends/ytdata/tests/test_unit.py index c59ed6f8603..4de579f3874 100644 --- a/yt/frontends/ytdata/tests/test_unit.py +++ b/yt/frontends/ytdata/tests/test_unit.py @@ -2,8 +2,17 @@ import shutil import tempfile +import numpy as np + from yt.convenience import load -from yt.testing import assert_fname, fake_random_ds, requires_file, requires_module +from yt.frontends.stream.data_structures import load_uniform_grid +from yt.testing import ( + assert_array_equal, + assert_fname, + fake_random_ds, + requires_file, + requires_module, +) from yt.utilities.answer_testing.framework import data_dir_load from yt.visualization.plot_window import ProjectionPlot, SlicePlot @@ -75,3 +84,47 @@ def test_plot_data(): os.chdir(curdir) if tmpdir != ".": shutil.rmtree(tmpdir) + + +@requires_module("h5py") +def test_non_square_frb(): + tmpdir = tempfile.mkdtemp() + curdir = os.getcwd() + os.chdir(tmpdir) + + # construct an arbitrary dataset + arr = np.arange(8.0 * 9.0 * 10.0).reshape((8, 9, 10)) + data = dict(density=(arr, "g/cm**3")) + bbox = np.array([[-4, 4.0], [-4.5, 4.5], [-5.0, 5]]) + ds = load_uniform_grid( + data, arr.shape, length_unit="Mpc", bbox=bbox, periodicity=(False, False, False) + ) + + # make a slice + slc = ds.slice(axis="z", coord=ds.quan(0.0, "code_length")) + # make a frb and save it to disk + center = (ds.quan(0.0, "code_length"), ds.quan(0.0, "code_length")) + xax, yax = ds.coordinates.x_axis[slc.axis], ds.coordinates.y_axis[slc.axis] + res = [ds.domain_dimensions[xax], ds.domain_dimensions[yax]] # = [8,9] + width = ds.domain_right_edge[xax] - ds.domain_left_edge[xax] # = 8 code_length + height = ds.domain_right_edge[yax] - ds.domain_left_edge[yax] # = 9 code_length + frb = slc.to_frb(width=width, height=height, resolution=res, center=center) + fname = "test_frb_roundtrip.h5" + frb.save_as_dataset(fname, fields=["density"]) + + expected_vals = arr[:, :, 5].T + print( + "\nConfirmation that initial frb results are expected:", + (expected_vals == frb["density"].v).all(), + "\n", + ) + + # yt-reload: + reloaded_ds = load(fname) + + assert_array_equal(frb["density"].shape, reloaded_ds.data["density"].shape) + assert_array_equal(frb["density"], reloaded_ds.data["density"]) + + os.chdir(curdir) + if tmpdir != ".": + shutil.rmtree(tmpdir) From 9fc15ad8a59c1e391268d31f8959558e3562d141 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 8 Aug 2020 19:48:46 +0200 Subject: [PATCH 485/653] gather all load_* functions in a common namespace revert __init__ changes --- setup.cfg | 1 + yt/__init__.py | 2 +- yt/loaders.py | 18 ++++++++++++++++++ 3 files changed, 20 insertions(+), 1 deletion(-) create mode 100644 yt/loaders.py diff --git a/setup.cfg b/setup.cfg index 86f0b95fe09..8ae6f8cc87d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,6 +14,7 @@ max-line-length=88 exclude = doc, benchmarks, */api.py, # avoid spurious "unused import" + yt/loaders.py, # avoid spurious "unused import" */__init__.py, # avoid spurious "unused import" */__config__.py, # autogenerated yt/extern, # vendored libraries diff --git a/yt/__init__.py b/yt/__init__.py index f0adf4ebfb5..9c69e4fb576 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -164,7 +164,7 @@ communication_system, ) -from yt.convenience import load, simulation +from yt.convenience import load, load_simulation, simulation from yt.utilities.load_sample import load_sample diff --git a/yt/loaders.py b/yt/loaders.py new file mode 100644 index 00000000000..2372e15e0f0 --- /dev/null +++ b/yt/loaders.py @@ -0,0 +1,18 @@ +""" +This module gathers all user-facing functions with a `load_` prefix. + +""" +# note: in the future, functions could be moved here instead +# in which case, this file should be removed from flake8 ignore list in setup.cfg + +# note: simulation() should be renamed load_simulation() +from .convenience import load, simulation +from .frontends.stream.api import ( + load_amr_grids, + load_hexahedral_mesh, + load_octree, + load_particles, + load_uniform_grid, + load_unstructured_mesh, +) +from .utilities import load_sample From 206226744a540d18fcd82f54e24885ce15d278e5 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 8 Aug 2020 20:16:52 +0200 Subject: [PATCH 486/653] deprecate yt.simulation in favor of yt.load_simulation --- yt/convenience.py | 14 +++++++++++++- yt/frontends/enzo/simulation_handling.py | 4 ++-- yt/frontends/exodus_ii/simulation_handling.py | 2 +- yt/frontends/gadget/simulation_handling.py | 4 ++-- yt/frontends/owls/simulation_handling.py | 2 +- yt/loaders.py | 3 +-- yt/tests/test_load_errors.py | 13 ++++++++----- yt/utilities/answer_testing/framework.py | 8 +++++--- yt/utilities/answer_testing/utils.py | 4 ++-- yt/visualization/profile_plotter.py | 6 +++--- 10 files changed, 38 insertions(+), 22 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index c7d89cddb63..3c68fcf9a2f 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -86,7 +86,7 @@ def load(fn, *args, **kwargs): raise YTOutputNotIdentified(fn, args, kwargs) -def simulation(fn, simulation_type, find_outputs=False): +def load_simulation(fn, simulation_type, find_outputs=False): """ Load a simulation time series object of the specified simulation type. @@ -123,3 +123,15 @@ def simulation(fn, simulation_type, find_outputs=False): raise YTSimulationNotIdentified(simulation_type) return cls(fn, find_outputs=find_outputs) + + +def simulation(fn, simulation_type, find_outputs=False): + from yt.funcs import issue_deprecation_warning + + issue_deprecation_warning( + "yt.simulation is a deprecated alias for yt.load_simulation" + "and will be removed in a future version of yt." + ) + return load_simulation( + fn=fn, simulation_type=simulation_type, find_outputs=find_outputs + ) diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 4b25514b8aa..415009c7dfd 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -43,7 +43,7 @@ class EnzoSimulation(SimulationTimeSeries): Examples -------- >>> import yt - >>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") + >>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") >>> es.get_time_series() >>> for ds in es: ... print(ds.current_time) @@ -205,7 +205,7 @@ def get_time_series( -------- >>> import yt - >>> es = yt.simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") + >>> es = yt.load_simulation("enzo_tiny_cosmology/32Mpc_32.enzo", "Enzo") >>> es.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr"), redshift_data=False) >>> for ds in es: diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index f0e9e9ea9be..137863e4c10 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -21,7 +21,7 @@ class ExodusIISimulation(DatasetSeries): Examples -------- >>> import yt - >>> sim = yt.simulation("demo_second", "ExodusII") + >>> sim = yt.load_simulation("demo_second", "ExodusII") >>> sim.get_time_series() >>> for ds in sim: ... print(ds.current_time) diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 69f2f5cbe54..13f6ccf97bd 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -42,7 +42,7 @@ class GadgetSimulation(SimulationTimeSeries): Examples -------- >>> import yt - >>> gs = yt.simulation("my_simulation.par", "Gadget") + >>> gs = yt.load_simulation("my_simulation.par", "Gadget") >>> gs.get_time_series() >>> for ds in gs: ... print(ds.current_time) @@ -190,7 +190,7 @@ def get_time_series( -------- >>> import yt - >>> gs = yt.simulation("my_simulation.par", "Gadget") + >>> gs = yt.load_simulation("my_simulation.par", "Gadget") >>> gs.get_time_series(initial_redshift=10, final_time=(13.7, "Gyr")) diff --git a/yt/frontends/owls/simulation_handling.py b/yt/frontends/owls/simulation_handling.py index ecc1b1abe92..78d3dc40ee2 100644 --- a/yt/frontends/owls/simulation_handling.py +++ b/yt/frontends/owls/simulation_handling.py @@ -26,7 +26,7 @@ class OWLSSimulation(GadgetSimulation): Examples -------- >>> import yt - >>> es = yt.simulation("my_simulation.par", "OWLS") + >>> es = yt.load_simulation("my_simulation.par", "OWLS") >>> es.get_time_series() >>> for ds in es: ... print(ds.current_time) diff --git a/yt/loaders.py b/yt/loaders.py index 2372e15e0f0..e983d4cfe25 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -5,8 +5,7 @@ # note: in the future, functions could be moved here instead # in which case, this file should be removed from flake8 ignore list in setup.cfg -# note: simulation() should be renamed load_simulation() -from .convenience import load, simulation +from .convenience import load, load_simulation from .frontends.stream.api import ( load_amr_grids, load_hexahedral_mesh, diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index 25bcaac2bf5..a874e141a30 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -2,8 +2,8 @@ import tempfile from pathlib import Path -from yt.convenience import load, simulation from yt.data_objects.static_output import Dataset +from yt.loaders import load, load_simulation from yt.testing import assert_raises from yt.utilities.exceptions import ( YTAmbiguousDataType, @@ -17,7 +17,10 @@ def test_load_nonexistent_data(): with tempfile.TemporaryDirectory() as tmpdir: assert_raises(FileNotFoundError, load, os.path.join(tmpdir, "not_a_file")) assert_raises( - FileNotFoundError, simulation, os.path.join(tmpdir, "not_a_file"), "Enzo" + FileNotFoundError, + load_simulation, + os.path.join(tmpdir, "not_a_file"), + "Enzo", ) # this one is a design choice: @@ -26,7 +29,7 @@ def test_load_nonexistent_data(): # so we make sure the error raised is not YTSimulationNotIdentified assert_raises( FileNotFoundError, - simulation, + load_simulation, os.path.join(tmpdir, "not_a_file"), "unregistered_simulation_type", ) @@ -40,13 +43,13 @@ def test_load_unidentified_data(): assert_raises(YTOutputNotIdentified, load, empty_file_path) assert_raises( YTSimulationNotIdentified, - simulation, + load_simulation, tmpdir, "unregistered_simulation_type", ) assert_raises( YTSimulationNotIdentified, - simulation, + load_simulation, empty_file_path, "unregistered_simulation_type", ) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 552bcaa1fc1..fa9cc896e5d 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -22,10 +22,10 @@ from nose.plugins import Plugin from yt.config import ytcfg -from yt.convenience import load, simulation from yt.data_objects.static_output import Dataset from yt.data_objects.time_series import SimulationTimeSeries from yt.funcs import get_pbar +from yt.loaders import load, load_simulation from yt.testing import ( assert_allclose_units, assert_almost_equal, @@ -323,7 +323,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): if file_check: return os.path.isfile(os.path.join(path, sim_fn)) and result_storage is not None try: - simulation(sim_fn, sim_type) + load_simulation(sim_fn, sim_type) except FileNotFoundError: if ytcfg.getboolean("yt", "requires_ds_strict"): if result_storage is not None: @@ -354,7 +354,9 @@ def sim_dir_load(sim_fn, path=None, sim_type="Enzo", find_outputs=False): raise IOError if os.path.exists(sim_fn) or not path: path = "." - return simulation(os.path.join(path, sim_fn), sim_type, find_outputs=find_outputs) + return load_simulation( + os.path.join(path, sim_fn), sim_type, find_outputs=find_outputs + ) class AnswerTestingTest: diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 7109783ba0b..02ceaef19ed 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -13,10 +13,10 @@ import yaml from yt.config import ytcfg -from yt.convenience import load, simulation from yt.data_objects.selection_data_containers import YTRegion from yt.data_objects.static_output import Dataset from yt.frontends.ytdata.api import save_as_dataset +from yt.loaders import load, load_simulation from yt.units.yt_array import YTArray, YTQuantity from yt.visualization import particle_plots, plot_window as pw, profile_plotter from yt.visualization.volume_rendering.scene import Scene @@ -322,7 +322,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): if file_check: return os.path.isfile(os.path.join(path, sim_fn)) try: - simulation(sim_fn, sim_type) + load_simulation(sim_fn, sim_type) except FileNotFoundError: return False return True diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 35936486d1f..e56aee7fd01 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -193,7 +193,7 @@ class ProfilePlot: This creates profiles from a time series object. - >>> es = yt.simulation("AMRCosmology.enzo", "Enzo") + >>> es = yt.load_simulation("AMRCosmology.enzo", "Enzo") >>> es.get_time_series() >>> profiles = [] @@ -463,8 +463,8 @@ def from_profiles(cls, profiles, labels=None, plot_specs=None, y_log=None): Examples -------- - >>> from yt import simulation - >>> es = simulation("AMRCosmology.enzo", "Enzo") + >>> from yt import load_simulation + >>> es = load_simulation("AMRCosmology.enzo", "Enzo") >>> es.get_time_series() >>> profiles = [] From c8c521ad347cce6a0938620b3c1e1c7ca335afa7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 9 Aug 2020 22:34:42 +0200 Subject: [PATCH 487/653] fix an import --- yt/loaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/loaders.py b/yt/loaders.py index e983d4cfe25..5315668520c 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -14,4 +14,4 @@ load_uniform_grid, load_unstructured_mesh, ) -from .utilities import load_sample +from .utilities.load_sample import load_sample From 4439e9ed0151ad9c272d832a297b4290810a01de Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 9 Aug 2020 09:26:29 +0200 Subject: [PATCH 488/653] internally import from yt.loaders --- yt/__init__.py | 26 ++++++++++--------- .../construction_data_containers.py | 2 +- .../level_sets/tests/test_clump_finding.py | 3 +-- yt/data_objects/tests/test_covering_grid.py | 2 +- yt/data_objects/tests/test_extract_regions.py | 2 +- yt/data_objects/tests/test_io_geometry.py | 2 +- yt/data_objects/time_series.py | 2 +- yt/fields/tests/test_magnetic_fields.py | 2 +- yt/frontends/artio/tests/test_outputs.py | 2 +- yt/frontends/athena/tests/test_outputs.py | 2 +- yt/frontends/athena_pp/tests/test_outputs.py | 2 +- yt/frontends/enzo/answer_testing_support.py | 2 +- yt/frontends/enzo/simulation_handling.py | 2 +- yt/frontends/exodus_ii/simulation_handling.py | 2 +- yt/frontends/gadget/simulation_handling.py | 2 +- .../halo_catalog/tests/test_outputs.py | 12 +++------ yt/frontends/open_pmd/tests/test_outputs.py | 2 +- yt/frontends/stream/api.py | 7 ++++- .../stream/tests/test_stream_hexahedral.py | 2 +- yt/frontends/ytdata/tests/test_outputs.py | 2 +- yt/frontends/ytdata/tests/test_unit.py | 2 +- yt/geometry/tests/test_grid_container.py | 2 +- yt/geometry/tests/test_particle_deposit.py | 2 +- yt/testing.py | 18 ++++++------- yt/units/tests/test_magnetic_code_units.py | 2 +- yt/utilities/command_line.py | 2 +- .../grid_data_format/tests/test_writer.py | 2 +- yt/utilities/load_sample.py | 2 +- yt/utilities/metadata.py | 2 +- yt/utilities/parallel_tools/io_runner.py | 2 +- yt/utilities/tests/test_particle_generator.py | 2 +- yt/visualization/fixed_resolution.py | 2 +- yt/visualization/tests/test_callbacks.py | 2 +- yt/visualization/tests/test_fits_image.py | 2 +- yt/visualization/tests/test_particle_plot.py | 2 +- yt/visualization/tests/test_plotwindow.py | 2 +- 36 files changed, 64 insertions(+), 64 deletions(-) diff --git a/yt/__init__.py b/yt/__init__.py index 9c69e4fb576..c52df4752a7 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -94,15 +94,8 @@ frontends = _frontend_container() -from yt.frontends.stream.api import ( - load_uniform_grid, - load_amr_grids, - load_particles, - load_hexahedral_mesh, - load_octree, - hexahedral_connectivity, - load_unstructured_mesh, -) +from yt.frontends.stream.api import hexahedral_connectivity + from yt.frontends.ytdata.api import save_as_dataset @@ -164,9 +157,18 @@ communication_system, ) -from yt.convenience import load, load_simulation, simulation - -from yt.utilities.load_sample import load_sample +from yt.loaders import ( + load, + load_simulation, + simulation, # deprecated alias for load_simulation + load_uniform_grid, + load_amr_grids, + load_particles, + load_hexahedral_mesh, + load_octree, + load_unstructured_mesh, + load_sample, +) from yt.testing import run_nose diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index c5798015f9a..de2ff01f4ee 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -19,10 +19,10 @@ from yt.extern.tqdm import tqdm from yt.fields.field_exceptions import NeedsGridType, NeedsOriginalGrid from yt.frontends.sph.data_structures import ParticleDataset -from yt.frontends.stream.api import load_uniform_grid from yt.funcs import ensure_list, get_memory_usage, iterable, mylog, only_on_root from yt.geometry import particle_deposit as particle_deposit from yt.geometry.coordinates.cartesian_coordinates import all_data +from yt.loaders import load_uniform_grid from yt.units.unit_object import Unit from yt.units.yt_array import YTArray, uconcatenate from yt.utilities.exceptions import ( diff --git a/yt/data_objects/level_sets/tests/test_clump_finding.py b/yt/data_objects/level_sets/tests/test_clump_finding.py index a2f4b756cd8..dde452f2380 100644 --- a/yt/data_objects/level_sets/tests/test_clump_finding.py +++ b/yt/data_objects/level_sets/tests/test_clump_finding.py @@ -4,11 +4,10 @@ import numpy as np -from yt.convenience import load from yt.data_objects.level_sets.api import Clump, add_clump_info, find_clumps from yt.data_objects.level_sets.clump_info_items import clump_info_registry from yt.fields.derived_field import ValidateParameter -from yt.frontends.stream.api import load_uniform_grid +from yt.loaders import load, load_uniform_grid from yt.testing import assert_array_equal, assert_equal, requires_file from yt.utilities.answer_testing.framework import data_dir_load diff --git a/yt/data_objects/tests/test_covering_grid.py b/yt/data_objects/tests/test_covering_grid.py index 24bee6adf40..4e8489aab31 100644 --- a/yt/data_objects/tests/test_covering_grid.py +++ b/yt/data_objects/tests/test_covering_grid.py @@ -1,8 +1,8 @@ import numpy as np -from yt.convenience import load from yt.fields.derived_field import ValidateParameter from yt.frontends.stream.data_structures import load_particles +from yt.loaders import load from yt.testing import ( assert_almost_equal, assert_array_equal, diff --git a/yt/data_objects/tests/test_extract_regions.py b/yt/data_objects/tests/test_extract_regions.py index 66b64d4d209..b3d813b842b 100644 --- a/yt/data_objects/tests/test_extract_regions.py +++ b/yt/data_objects/tests/test_extract_regions.py @@ -1,6 +1,6 @@ import numpy as np -from yt.convenience import load +from yt.loaders import load from yt.testing import ( assert_almost_equal, assert_equal, diff --git a/yt/data_objects/tests/test_io_geometry.py b/yt/data_objects/tests/test_io_geometry.py index 4716276ab70..35d071f310d 100644 --- a/yt/data_objects/tests/test_io_geometry.py +++ b/yt/data_objects/tests/test_io_geometry.py @@ -3,9 +3,9 @@ import numpy as np -from yt.convenience import load from yt.frontends.ytdata.api import save_as_dataset from yt.frontends.ytdata.data_structures import YTDataContainerDataset +from yt.loaders import load from yt.testing import fake_amr_ds, requires_module from yt.units import YTQuantity diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 6ab907d3b44..55046145542 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -8,10 +8,10 @@ import numpy as np from yt.config import ytcfg -from yt.convenience import load from yt.data_objects.analyzer_objects import AnalysisTask, create_quantity_proxy from yt.data_objects.particle_trajectories import ParticleTrajectories from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog +from yt.loaders import load from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.exceptions import YTException, YTOutputNotIdentified from yt.utilities.object_registries import ( diff --git a/yt/fields/tests/test_magnetic_fields.py b/yt/fields/tests/test_magnetic_fields.py index c2cb817bdf7..5f43f36e321 100644 --- a/yt/fields/tests/test_magnetic_fields.py +++ b/yt/fields/tests/test_magnetic_fields.py @@ -1,6 +1,6 @@ import numpy as np -from yt.frontends.stream.api import load_uniform_grid +from yt.loaders import load_uniform_grid from yt.testing import assert_almost_equal from yt.utilities.physical_constants import mu_0 diff --git a/yt/frontends/artio/tests/test_outputs.py b/yt/frontends/artio/tests/test_outputs.py index cf48b9c5937..2e80b714f6b 100644 --- a/yt/frontends/artio/tests/test_outputs.py +++ b/yt/frontends/artio/tests/test_outputs.py @@ -1,5 +1,5 @@ -from yt.convenience import load from yt.frontends.artio.api import ARTIODataset +from yt.loaders import load from yt.testing import ( assert_allclose_units, assert_equal, diff --git a/yt/frontends/athena/tests/test_outputs.py b/yt/frontends/athena/tests/test_outputs.py index f0375cf6f50..8798b7e6b3d 100644 --- a/yt/frontends/athena/tests/test_outputs.py +++ b/yt/frontends/athena/tests/test_outputs.py @@ -1,6 +1,6 @@ import yt.units as u -from yt.convenience import load from yt.frontends.athena.api import AthenaDataset +from yt.loaders import load from yt.testing import ( assert_allclose_units, assert_equal, diff --git a/yt/frontends/athena_pp/tests/test_outputs.py b/yt/frontends/athena_pp/tests/test_outputs.py index b304e386024..d289ad91848 100644 --- a/yt/frontends/athena_pp/tests/test_outputs.py +++ b/yt/frontends/athena_pp/tests/test_outputs.py @@ -1,7 +1,7 @@ import numpy as np -from yt.convenience import load from yt.frontends.athena_pp.api import AthenaPPDataset +from yt.loaders import load from yt.testing import ( assert_allclose, assert_equal, diff --git a/yt/frontends/enzo/answer_testing_support.py b/yt/frontends/enzo/answer_testing_support.py index 2f2552a0c35..b83ae8b6f46 100644 --- a/yt/frontends/enzo/answer_testing_support.py +++ b/yt/frontends/enzo/answer_testing_support.py @@ -4,7 +4,7 @@ import numpy as np from yt.config import ytcfg -from yt.convenience import load +from yt.loaders import load from yt.testing import assert_allclose from yt.utilities.answer_testing.framework import ( AnswerTestingTest, diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 415009c7dfd..0408239e7eb 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -5,9 +5,9 @@ from unyt import dimensions, unyt_array from unyt.unit_registry import UnitRegistry -from yt.convenience import load from yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries from yt.funcs import only_on_root +from yt.loaders import load from yt.utilities.cosmology import Cosmology from yt.utilities.exceptions import ( InvalidSimulationTimeSeries, diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index 137863e4c10..f751fef8902 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -1,8 +1,8 @@ import glob -from yt.convenience import load from yt.data_objects.time_series import DatasetSeries from yt.funcs import only_on_root +from yt.loaders import load from yt.utilities.exceptions import YTOutputNotIdentified from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 13f6ccf97bd..81ab256c371 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -5,9 +5,9 @@ from unyt import dimensions, unyt_array from unyt.unit_registry import UnitRegistry -from yt.convenience import load from yt.data_objects.time_series import DatasetSeries, SimulationTimeSeries from yt.funcs import only_on_root +from yt.loaders import load from yt.utilities.cosmology import Cosmology from yt.utilities.exceptions import ( InvalidSimulationTimeSeries, diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index c371a5a0491..b2e033a6880 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,15 +1,9 @@ import numpy as np -from yt.convenience import load as yt_load -from yt.frontends.halo_catalog.data_structures import YTHaloCatalogDataset +from yt.frontends.halo_catalog.data_structures import HaloCatalogDataset from yt.frontends.ytdata.utilities import save_as_dataset -from yt.testing import ( - TempDirTest, - assert_array_equal, - assert_equal, - requires_file, - requires_module, -) +from yt.loaders import load as yt_load +from yt.testing import TempDirTest, assert_array_equal, requires_module from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.answer_testing.framework import data_dir_load diff --git a/yt/frontends/open_pmd/tests/test_outputs.py b/yt/frontends/open_pmd/tests/test_outputs.py index a871d6f00e5..2840fd07b7f 100644 --- a/yt/frontends/open_pmd/tests/test_outputs.py +++ b/yt/frontends/open_pmd/tests/test_outputs.py @@ -2,8 +2,8 @@ import numpy as np -from yt.convenience import load from yt.frontends.open_pmd.data_structures import OpenPMDDataset +from yt.loaders import load from yt.testing import ( assert_almost_equal, assert_array_equal, diff --git a/yt/frontends/stream/api.py b/yt/frontends/stream/api.py index c4441a726f8..e029d9aad3e 100644 --- a/yt/frontends/stream/api.py +++ b/yt/frontends/stream/api.py @@ -5,13 +5,18 @@ StreamHandler, StreamHierarchy, hexahedral_connectivity, + refine_amr, +) + +""" +from yt.loaders import ( load_amr_grids, load_hexahedral_mesh, load_octree, load_particles, load_uniform_grid, load_unstructured_mesh, - refine_amr, ) +""" from .fields import StreamFieldInfo from .io import IOHandlerStream diff --git a/yt/frontends/stream/tests/test_stream_hexahedral.py b/yt/frontends/stream/tests/test_stream_hexahedral.py index 5fcdba8dc08..873b6d504ef 100644 --- a/yt/frontends/stream/tests/test_stream_hexahedral.py +++ b/yt/frontends/stream/tests/test_stream_hexahedral.py @@ -1,8 +1,8 @@ import numpy as np from yt import SlicePlot -from yt.frontends.stream.api import load_hexahedral_mesh from yt.frontends.stream.data_structures import hexahedral_connectivity +from yt.loaders import load_hexahedral_mesh from yt.testing import assert_almost_equal, assert_equal # Field information diff --git a/yt/frontends/ytdata/tests/test_outputs.py b/yt/frontends/ytdata/tests/test_outputs.py index f58db17f094..5d406433e5b 100644 --- a/yt/frontends/ytdata/tests/test_outputs.py +++ b/yt/frontends/ytdata/tests/test_outputs.py @@ -4,7 +4,6 @@ import numpy as np -from yt.convenience import load from yt.data_objects.api import create_profile from yt.frontends.ytdata.api import ( YTDataContainerDataset, @@ -14,6 +13,7 @@ YTSpatialPlotDataset, save_as_dataset, ) +from yt.loaders import load from yt.testing import assert_allclose_units, assert_array_equal, assert_equal from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.answer_testing.framework import ( diff --git a/yt/frontends/ytdata/tests/test_unit.py b/yt/frontends/ytdata/tests/test_unit.py index c59ed6f8603..bd45f3b38ce 100644 --- a/yt/frontends/ytdata/tests/test_unit.py +++ b/yt/frontends/ytdata/tests/test_unit.py @@ -2,7 +2,7 @@ import shutil import tempfile -from yt.convenience import load +from yt.loaders import load from yt.testing import assert_fname, fake_random_ds, requires_file, requires_module from yt.utilities.answer_testing.framework import data_dir_load from yt.visualization.plot_window import ProjectionPlot, SlicePlot diff --git a/yt/geometry/tests/test_grid_container.py b/yt/geometry/tests/test_grid_container.py index b9418831e19..b002dde8a15 100644 --- a/yt/geometry/tests/test_grid_container.py +++ b/yt/geometry/tests/test_grid_container.py @@ -2,7 +2,7 @@ import numpy as np -from yt.frontends.stream.api import load_amr_grids +from yt.loaders import load_amr_grids from yt.testing import assert_equal, assert_raises diff --git a/yt/geometry/tests/test_particle_deposit.py b/yt/geometry/tests/test_particle_deposit.py index c8d1b72e87f..083d63102a6 100644 --- a/yt/geometry/tests/test_particle_deposit.py +++ b/yt/geometry/tests/test_particle_deposit.py @@ -1,7 +1,7 @@ from numpy.testing import assert_allclose, assert_array_less, assert_raises import yt -from yt.convenience import load +from yt.loaders import load from yt.testing import fake_random_ds, requires_file from yt.utilities.exceptions import YTBoundsDefinitionError diff --git a/yt/testing.py b/yt/testing.py index 13161fb94e2..c341e1a0253 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -14,8 +14,8 @@ from unyt.exceptions import UnitOperationError from yt.config import ytcfg -from yt.convenience import load from yt.funcs import iterable +from yt.loaders import load from yt.units.yt_array import YTArray, YTQuantity # we import this in a weird way from numpy.testing to avoid triggering @@ -201,7 +201,7 @@ def fake_random_ds( unit_system="cgs", bbox=None, ): - from yt.frontends.stream.api import load_uniform_grid + from yt.loaders import load_uniform_grid prng = RandomState(0x4D3D3D3) if not iterable(ndims): @@ -261,7 +261,7 @@ def fake_random_ds( def fake_amr_ds( fields=("Density",), geometry="cartesian", particles=0, length_unit=None ): - from yt.frontends.stream.api import load_amr_grids + from yt.loaders import load_amr_grids prng = RandomState(0x4D3D3D3) LE, RE = _geom_transforms[geometry] @@ -309,7 +309,7 @@ def fake_particle_ds( length_unit=1.0, data=None, ): - from yt.frontends.stream.api import load_particles + from yt.loaders import load_particles prng = RandomState(0x4D3D3D3) if not iterable(negative): @@ -337,11 +337,11 @@ def fake_particle_ds( def fake_tetrahedral_ds(): - from yt.frontends.stream.api import load_unstructured_mesh from yt.frontends.stream.sample_data.tetrahedral_mesh import ( _connectivity, _coordinates, ) + from yt.loaders import load_unstructured_mesh prng = RandomState(0x4D3D3D3) @@ -361,11 +361,11 @@ def fake_tetrahedral_ds(): def fake_hexahedral_ds(): - from yt.frontends.stream.api import load_unstructured_mesh from yt.frontends.stream.sample_data.hexahedral_mesh import ( _connectivity, _coordinates, ) + from yt.loaders import load_unstructured_mesh prng = RandomState(0x4D3D3D3) # the distance from the origin @@ -384,7 +384,7 @@ def fake_hexahedral_ds(): def small_fake_hexahedral_ds(): - from yt.frontends.stream.api import load_unstructured_mesh + from yt.loaders import load_unstructured_mesh _coordinates = np.array( [ @@ -431,7 +431,7 @@ def fake_vr_orientation_test_ds(N=96, scale=1): test datasets that have spatial different scales (e.g. data in CGS units) """ - from yt.frontends.stream.api import load_uniform_grid + from yt.loaders import load_uniform_grid xmin = ymin = zmin = -1.0 * scale xmax = ymax = zmax = 1.0 * scale @@ -614,7 +614,7 @@ def fake_octree_ds( partial_coverage=1, unit_system="cgs", ): - from yt.frontends.stream.api import load_octree + from yt.loaders import load_octree octree_mask = np.asarray( construct_octree_mask(prng=prng, refined=refined), dtype=np.uint8 diff --git a/yt/units/tests/test_magnetic_code_units.py b/yt/units/tests/test_magnetic_code_units.py index 52d3d605bce..7915c5069d7 100644 --- a/yt/units/tests/test_magnetic_code_units.py +++ b/yt/units/tests/test_magnetic_code_units.py @@ -1,6 +1,6 @@ import numpy as np -from yt.frontends.stream.api import load_uniform_grid +from yt.loaders import load_uniform_grid from yt.testing import assert_allclose diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index c3d5a37ed51..51d8664fbee 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -16,7 +16,6 @@ import numpy as np from yt.config import CURRENT_CONFIG_FILE, ytcfg -from yt.convenience import load from yt.extern.tqdm import tqdm from yt.funcs import ( download_file, @@ -29,6 +28,7 @@ mylog, update_hg_or_git, ) +from yt.loaders import load from yt.utilities.configure import set_config from yt.utilities.exceptions import ( YTCommandRequiresModule, diff --git a/yt/utilities/grid_data_format/tests/test_writer.py b/yt/utilities/grid_data_format/tests/test_writer.py index a6285059935..6f2eb9e9506 100644 --- a/yt/utilities/grid_data_format/tests/test_writer.py +++ b/yt/utilities/grid_data_format/tests/test_writer.py @@ -3,7 +3,7 @@ import tempfile from yt.frontends.gdf.data_structures import GDFDataset -from yt.mods import load +from yt.loaders import load from yt.testing import assert_equal, fake_random_ds, requires_module from yt.utilities.grid_data_format.writer import write_to_gdf from yt.utilities.on_demand_imports import _h5py as h5 diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py index 90b8b7f3cde..4458a21a6be 100644 --- a/yt/utilities/load_sample.py +++ b/yt/utilities/load_sample.py @@ -8,8 +8,8 @@ import os import yt.utilities.sample_data as sd -from yt.convenience import load from yt.funcs import mylog +from yt.loaders import load from yt.utilities.on_demand_imports import _pooch as pch diff --git a/yt/utilities/metadata.py b/yt/utilities/metadata.py index e210f8cd915..2863654c737 100644 --- a/yt/utilities/metadata.py +++ b/yt/utilities/metadata.py @@ -1,4 +1,4 @@ -from yt.convenience import load +from yt.loaders import load DEFAULT_ATTRS = ( "dimensionality", diff --git a/yt/utilities/parallel_tools/io_runner.py b/yt/utilities/parallel_tools/io_runner.py index 97214e8a7a1..a98f6b390bf 100644 --- a/yt/utilities/parallel_tools/io_runner.py +++ b/yt/utilities/parallel_tools/io_runner.py @@ -149,7 +149,7 @@ def remote_io(ds, wg, pool): def io_nodes(fn, n_io, n_work, func, *args, **kwargs): - from yt.mods import load + from yt.loaders import load pool, wg = ProcessorPool.from_sizes([(n_io, "io"), (n_work, "work")]) rv = None diff --git a/yt/utilities/tests/test_particle_generator.py b/yt/utilities/tests/test_particle_generator.py index 36b1d8c0dc1..9a4c73b85a0 100644 --- a/yt/utilities/tests/test_particle_generator.py +++ b/yt/utilities/tests/test_particle_generator.py @@ -2,7 +2,7 @@ import yt.utilities.flagging_methods as fm import yt.utilities.initial_conditions as ic -from yt.frontends.stream.api import load_uniform_grid, refine_amr +from yt.loaders import load_uniform_grid, refine_amr from yt.testing import assert_almost_equal, assert_equal from yt.units.yt_array import uconcatenate from yt.utilities.particle_generator import ( diff --git a/yt/visualization/fixed_resolution.py b/yt/visualization/fixed_resolution.py index 256e4b3df3e..3b0522e03fa 100644 --- a/yt/visualization/fixed_resolution.py +++ b/yt/visualization/fixed_resolution.py @@ -4,7 +4,6 @@ import numpy as np from yt.data_objects.image_array import ImageArray -from yt.frontends.stream.api import load_uniform_grid from yt.frontends.ytdata.utilities import save_as_dataset from yt.funcs import ( deprecate, @@ -13,6 +12,7 @@ issue_deprecation_warning, mylog, ) +from yt.loaders import load_uniform_grid from yt.utilities.lib.api import add_points_to_greyscale_image from yt.utilities.lib.pixelization_routines import pixelize_cylinder from yt.utilities.on_demand_imports import _h5py as h5py diff --git a/yt/visualization/tests/test_callbacks.py b/yt/visualization/tests/test_callbacks.py index 2390edcbede..383539b2cf0 100644 --- a/yt/visualization/tests/test_callbacks.py +++ b/yt/visualization/tests/test_callbacks.py @@ -6,7 +6,7 @@ import yt.units as u from yt.config import ytcfg -from yt.convenience import load +from yt.loaders import load from yt.testing import ( assert_fname, fake_amr_ds, diff --git a/yt/visualization/tests/test_fits_image.py b/yt/visualization/tests/test_fits_image.py index a42429fd9cb..8fe6fb6941d 100644 --- a/yt/visualization/tests/test_fits_image.py +++ b/yt/visualization/tests/test_fits_image.py @@ -4,7 +4,7 @@ from numpy.testing import assert_allclose, assert_equal -from yt.convenience import load +from yt.loaders import load from yt.testing import fake_random_ds, requires_module from yt.utilities.on_demand_imports import _astropy from yt.visualization.fits_image import ( diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index ed78f790458..03a4a1e4f81 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -6,9 +6,9 @@ import mock import numpy as np -from yt.convenience import load from yt.data_objects.particle_filters import add_particle_filter from yt.data_objects.profiles import create_profile +from yt.loaders import load from yt.testing import ( assert_allclose, assert_array_almost_equal, diff --git a/yt/visualization/tests/test_plotwindow.py b/yt/visualization/tests/test_plotwindow.py index f75f89a763d..b0314f1bc04 100644 --- a/yt/visualization/tests/test_plotwindow.py +++ b/yt/visualization/tests/test_plotwindow.py @@ -11,7 +11,7 @@ import numpy as np from nose.tools import assert_true -from yt.frontends.stream.api import load_uniform_grid +from yt.loaders import load_uniform_grid from yt.testing import ( assert_array_almost_equal, assert_array_equal, From 9b59df958e3bbc7f02333e77d5bf29d46c125b6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 9 Aug 2020 19:35:51 +0200 Subject: [PATCH 489/653] separate definitions, loaders and datastructures in stream frontend --- yt/data_objects/tests/test_covering_grid.py | 2 +- .../tests/test_exclude_functions.py | 2 +- yt/data_objects/time_series.py | 1 - yt/frontends/stream/api.py | 11 +- yt/frontends/stream/data_structures.py | 1445 +---------------- yt/frontends/stream/definitions.py | 362 +++++ yt/frontends/stream/loaders.py | 1103 +++++++++++++ yt/frontends/stream/tests/test_outputs.py | 2 +- yt/loaders.py | 4 +- yt/utilities/tests/test_particle_generator.py | 3 +- 10 files changed, 1479 insertions(+), 1456 deletions(-) create mode 100644 yt/frontends/stream/loaders.py diff --git a/yt/data_objects/tests/test_covering_grid.py b/yt/data_objects/tests/test_covering_grid.py index 4e8489aab31..657386a4f38 100644 --- a/yt/data_objects/tests/test_covering_grid.py +++ b/yt/data_objects/tests/test_covering_grid.py @@ -1,7 +1,7 @@ import numpy as np from yt.fields.derived_field import ValidateParameter -from yt.frontends.stream.data_structures import load_particles +from yt.frontends.stream.loaders import load_particles from yt.loaders import load from yt.testing import ( assert_almost_equal, diff --git a/yt/data_objects/tests/test_exclude_functions.py b/yt/data_objects/tests/test_exclude_functions.py index 5ccb531cd6f..e2abc982242 100644 --- a/yt/data_objects/tests/test_exclude_functions.py +++ b/yt/data_objects/tests/test_exclude_functions.py @@ -1,6 +1,6 @@ import numpy as np -from yt.frontends.stream.data_structures import load_uniform_grid +from yt.frontends.stream.loaders import load_uniform_grid from yt.testing import assert_equal, fake_random_ds diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 55046145542..95a045c5877 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -11,7 +11,6 @@ from yt.data_objects.analyzer_objects import AnalysisTask, create_quantity_proxy from yt.data_objects.particle_trajectories import ParticleTrajectories from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog -from yt.loaders import load from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.exceptions import YTException, YTOutputNotIdentified from yt.utilities.object_registries import ( diff --git a/yt/frontends/stream/api.py b/yt/frontends/stream/api.py index e029d9aad3e..14d10008df5 100644 --- a/yt/frontends/stream/api.py +++ b/yt/frontends/stream/api.py @@ -5,11 +5,11 @@ StreamHandler, StreamHierarchy, hexahedral_connectivity, - refine_amr, ) - -""" -from yt.loaders import ( +from .definitions import refine_amr +from .fields import StreamFieldInfo +from .io import IOHandlerStream +from .loaders import ( load_amr_grids, load_hexahedral_mesh, load_octree, @@ -17,6 +17,3 @@ load_uniform_grid, load_unstructured_mesh, ) -""" -from .fields import StreamFieldInfo -from .io import IOHandlerStream diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index ac71ef66a64..babda0af5b6 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -2,7 +2,6 @@ import time import uuid import weakref -from collections import defaultdict from itertools import chain, product, repeat from numbers import Number as numeric_type @@ -15,24 +14,14 @@ from yt.data_objects.static_output import Dataset, ParticleFile from yt.data_objects.unions import MeshUnion from yt.data_objects.unstructured_mesh import SemiStructuredMesh, UnstructuredMesh -from yt.frontends.exodus_ii.util import get_num_pseudo_dims from yt.frontends.sph.data_structures import SPHParticleIndex -from yt.funcs import ensure_list, issue_deprecation_warning, iterable +from yt.funcs import ensure_list from yt.geometry.geometry_handler import YTDataChunk -from yt.geometry.grid_container import GridTree, MatchPointsToGrids from yt.geometry.grid_geometry_handler import GridIndex from yt.geometry.oct_container import OctreeContainer from yt.geometry.oct_geometry_handler import OctreeIndex from yt.geometry.unstructured_mesh_handler import UnstructuredIndex -from yt.units.yt_array import YTQuantity, uconcatenate -from yt.utilities.decompose import decompose_array, get_psize -from yt.utilities.exceptions import ( - YTIllDefinedAMR, - YTInconsistentGridFieldShape, - YTInconsistentGridFieldShapeGridDims, - YTInconsistentParticleFieldShape, -) -from yt.utilities.flagging_methods import FlaggingGrid +from yt.units.yt_array import YTQuantity from yt.utilities.io_handler import io_registry from yt.utilities.lib.cykdtree import PyKDTree from yt.utilities.lib.misc_utilities import get_box_grids_level @@ -42,6 +31,7 @@ ) from yt.utilities.logger import ytLogger as mylog +from .definitions import process_data, set_particle_types from .fields import StreamFieldInfo @@ -374,749 +364,6 @@ def all_fields(self): return fields -def set_particle_types(data): - particle_types = {} - for key in data.keys(): - if key == "number_of_particles": - continue - if len(data[key].shape) == 1: - particle_types[key] = True - else: - particle_types[key] = False - return particle_types - - -def assign_particle_data(ds, pdata, bbox): - - """ - Assign particle data to the grids using MatchPointsToGrids. This - will overwrite any existing particle data, so be careful! - """ - - for ptype in ds.particle_types_raw: - check_fields = [(ptype, "particle_position_x"), (ptype, "particle_position")] - if all(f not in pdata for f in check_fields): - pdata_ftype = {} - for f in [k for k in sorted(pdata)]: - if not hasattr(pdata[f], "shape"): - continue - if f == "number_of_particles": - continue - mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f) - pdata_ftype[ptype, f] = pdata.pop(f) - pdata_ftype.update(pdata) - pdata = pdata_ftype - - # Note: what we need to do here is a bit tricky. Because occasionally this - # gets called before we property handle the field detection, we cannot use - # any information about the index. Fortunately for us, we can generate - # most of the GridTree utilizing information we already have from the - # stream handler. - - if len(ds.stream_handler.fields) > 1: - pdata.pop("number_of_particles", None) - num_grids = len(ds.stream_handler.fields) - parent_ids = ds.stream_handler.parent_ids - num_children = np.zeros(num_grids, dtype="int64") - # We're going to do this the slow way - mask = np.empty(num_grids, dtype="bool") - for i in range(num_grids): - np.equal(parent_ids, i, mask) - num_children[i] = mask.sum() - levels = ds.stream_handler.levels.astype("int64").ravel() - grid_tree = GridTree( - num_grids, - ds.stream_handler.left_edges, - ds.stream_handler.right_edges, - ds.stream_handler.dimensions, - ds.stream_handler.parent_ids, - levels, - num_children, - ) - - grid_pdata = [] - for _ in range(num_grids): - grid = {"number_of_particles": 0} - grid_pdata.append(grid) - - for ptype in ds.particle_types_raw: - if (ptype, "particle_position_x") in pdata: - x, y, z = (pdata[ptype, f"particle_position_{ax}"] for ax in "xyz") - elif (ptype, "particle_position") in pdata: - x, y, z = pdata[ptype, "particle_position"].T - else: - raise KeyError( - "Cannot decompose particle data without position fields!" - ) - pts = MatchPointsToGrids(grid_tree, len(x), x, y, z) - particle_grid_inds = pts.find_points_in_tree() - (assigned_particles,) = (particle_grid_inds >= 0).nonzero() - num_particles = particle_grid_inds.size - num_unassigned = num_particles - assigned_particles.size - if num_unassigned > 0: - eps = np.finfo(x.dtype).eps - s = np.array( - [ - [x.min() - eps, x.max() + eps], - [y.min() - eps, y.max() + eps], - [z.min() - eps, z.max() + eps], - ] - ) - sug_bbox = [ - [min(bbox[0, 0], s[0, 0]), max(bbox[0, 1], s[0, 1])], - [min(bbox[1, 0], s[1, 0]), max(bbox[1, 1], s[1, 1])], - [min(bbox[2, 0], s[2, 0]), max(bbox[2, 1], s[2, 1])], - ] - mylog.warning( - "Discarding %s particles (out of %s) that are outside " - "bounding box. Set bbox=%s to avoid this in the future.", - num_unassigned, - num_particles, - sug_bbox, - ) - particle_grid_inds = particle_grid_inds[assigned_particles] - x = x[assigned_particles] - y = y[assigned_particles] - z = z[assigned_particles] - idxs = np.argsort(particle_grid_inds) - particle_grid_count = np.bincount( - particle_grid_inds.astype("intp"), minlength=num_grids - ) - particle_indices = np.zeros(num_grids + 1, dtype="int64") - if num_grids > 1: - np.add.accumulate( - particle_grid_count.squeeze(), out=particle_indices[1:] - ) - else: - particle_indices[1] = particle_grid_count.squeeze() - for i, pcount in enumerate(particle_grid_count): - grid_pdata[i]["number_of_particles"] += pcount - start = particle_indices[i] - end = particle_indices[i + 1] - for key in pdata.keys(): - if key[0] == ptype: - grid_pdata[i][key] = pdata[key][idxs][start:end] - - else: - grid_pdata = [pdata] - - for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)): - ds.stream_handler.fields[gi].update(pd) - ds.stream_handler.particle_types.update(set_particle_types(pd)) - npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0) - ds.stream_handler.particle_count[gi] = npart - - -def process_data(data, grid_dims=None): - new_data, field_units = {}, {} - for field, val in data.items(): - # val is a data array - if isinstance(val, np.ndarray): - # val is a YTArray - if hasattr(val, "units"): - field_units[field] = val.units - new_data[field] = val.copy().d - # val is a numpy array - else: - field_units[field] = "" - new_data[field] = val.copy() - - # val is a tuple of (data, units) - elif isinstance(val, tuple) and len(val) == 2: - try: - assert isinstance(field, (str, tuple)), "Field name is not a string!" - assert isinstance(val[0], np.ndarray), "Field data is not an ndarray!" - assert isinstance(val[1], str), "Unit specification is not a string!" - field_units[field] = val[1] - new_data[field] = val[0] - except AssertionError as e: - raise RuntimeError("The data dict appears to be invalid.\n" + str(e)) - - # val is a list of data to be turned into an array - elif iterable(val): - field_units[field] = "" - new_data[field] = np.asarray(val) - - else: - raise RuntimeError( - "The data dict appears to be invalid. " - "The data dictionary must map from field " - "names to (numpy array, unit spec) tuples. " - ) - - data = new_data - - # At this point, we have arrays for all our fields - new_data = {} - for field in data: - n_shape = len(data[field].shape) - if isinstance(field, tuple): - new_field = field - elif n_shape in (1, 2): - new_field = ("io", field) - elif n_shape == 3: - new_field = ("stream", field) - else: - raise RuntimeError - new_data[new_field] = data[field] - field_units[new_field] = field_units.pop(field) - known_fields = ( - StreamFieldInfo.known_particle_fields + StreamFieldInfo.known_other_fields - ) - # We do not want to override any of the known ones, if it's not - # overridden here. - if ( - any(f[0] == new_field[1] for f in known_fields) - and field_units[new_field] == "" - ): - field_units.pop(new_field) - data = new_data - # Sanity checking that all fields have the same dimensions. - g_shapes = [] - p_shapes = defaultdict(list) - for field in data: - f_shape = data[field].shape - n_shape = len(f_shape) - if n_shape in (1, 2): - p_shapes[field[0]].append((field[1], f_shape[0])) - elif n_shape == 3: - g_shapes.append((field, f_shape)) - if len(g_shapes) > 0: - g_s = np.array([s[1] for s in g_shapes]) - if not np.all(g_s == g_s[0]): - raise YTInconsistentGridFieldShape(g_shapes) - if grid_dims is not None: - if not np.all(g_s == grid_dims): - raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims) - if len(p_shapes) > 0: - for ptype, p_shape in p_shapes.items(): - p_s = np.array([s[1] for s in p_shape]) - if not np.all(p_s == p_s[0]): - raise YTInconsistentParticleFieldShape(ptype, p_shape) - # Now that we know the particle fields are consistent, determine the number - # of particles. - if len(p_shapes) > 0: - number_of_particles = np.sum([s[0][1] for s in p_shapes.values()]) - else: - number_of_particles = 0 - return field_units, data, number_of_particles - - -def load_uniform_grid( - data, - domain_dimensions, - length_unit=None, - bbox=None, - nprocs=1, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", -): - r"""Load a uniform grid of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow a uniform grid of data to be loaded directly into yt and - analyzed as would any others. This comes with several caveats: - - * Units will be incorrect unless the unit system is explicitly - specified. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - - Particle fields are detected as one-dimensional fields. - - Parameters - ---------- - data : dict - This is a dict of numpy arrays or (numpy array, unit spec) tuples. - The keys are the field names. - domain_dimensions : array_like - This is the domain dimensions of the grid - length_unit : string - Unit to use for lengths. Defaults to unitless. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units specified by length_unit. - Defaults to a cubic unit-length domain. - nprocs: integer, optional - If greater than 1, will create this number of subarrays out of data - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - Examples - -------- - - >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]]) - >>> arr = np.random.random((128, 128, 128)) - >>> data = dict(density=arr) - >>> ds = load_uniform_grid(data, arr.shape, length_unit='cm', - ... bbox=bbox, nprocs=12) - >>> dd = ds.all_data() - >>> dd['density'] - YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, - 0.97506269, 0.03047113]) g/cm**3 - """ - - domain_dimensions = np.array(domain_dimensions) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - # If someone included this throw it away--old API - if "number_of_particles" in data: - issue_deprecation_warning( - "It is no longer necessary to include " - "the number of particles in the data " - "dict. The number of particles is " - "determined from the sizes of the " - "particle fields." - ) - data.pop("number_of_particles") - # First we fix our field names, apply units to data - # and check for consistency of field shapes - field_units, data, number_of_particles = process_data( - data, grid_dims=tuple(domain_dimensions) - ) - - sfh = StreamDictFieldHandler() - - if number_of_particles > 0: - particle_types = set_particle_types(data) - # Used much further below. - pdata = {"number_of_particles": number_of_particles} - for key in list(data.keys()): - if len(data[key].shape) == 1 or key[0] == "io": - if not isinstance(key, tuple): - field = ("io", key) - mylog.debug("Reassigning '%s' to '%s'", key, field) - else: - field = key - sfh._additional_fields += (field,) - pdata[field] = data.pop(key) - else: - particle_types = {} - - if nprocs > 1: - temp = {} - new_data = {} - for key in data.keys(): - psize = get_psize(np.array(data[key].shape), nprocs) - grid_left_edges, grid_right_edges, shapes, slices = decompose_array( - data[key].shape, psize, bbox - ) - grid_dimensions = np.array([shape for shape in shapes], dtype="int32") - temp[key] = [data[key][slice] for slice in slices] - for gid in range(nprocs): - new_data[gid] = {} - for key in temp.keys(): - new_data[gid].update({key: temp[key][gid]}) - sfh.update(new_data) - del new_data, temp - else: - sfh.update({0: data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # particle count - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "UniformGridData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - if np.all(domain_dimensions[1:] == 1): - dimensionality = 1 - elif domain_dimensions[2] == 1: - dimensionality = 2 - else: - dimensionality = 3 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) - - # Now figure out where the particles go - if number_of_particles > 0: - # This will update the stream handler too - assign_particle_data(sds, pdata, bbox) - - return sds - - -def load_amr_grids( - grid_data, - domain_dimensions, - bbox=None, - sim_time=0.0, - length_unit=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - refine_by=2, - unit_system="cgs", -): - r"""Load a set of grids of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - This should allow a sequence of grids of varying resolution of data to be - loaded directly into yt and analyzed as would any others. This comes with - several caveats: - - * Units will be incorrect unless the unit system is explicitly specified. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - * No consistency checks are performed on the index - - Parameters - ---------- - - grid_data : list of dicts - This is a list of dicts. Each dict must have entries "left_edge", - "right_edge", "dimensions", "level", and then any remaining entries are - assumed to be fields. Field entries must map to an NDArray. The grid_data - may also include a particle count. If no particle count is supplied, the - dataset is understood to contain no particles. The grid_data will be - modified in place and can't be assumed to be static. - domain_dimensions : array_like - This is the domain dimensions of the grid - length_unit : string or float - Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox - dimensions are assumed to be in the corresponding units. If set to a float, the - value is a assumed to be the conversion from bbox dimensions to centimeters. - mass_unit : string or float - Unit to use for masses. Defaults to unitless. - time_unit : string or float - Unit to use for times. Defaults to unitless. - velocity_unit : string or float - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string or float - Unit to use for magnetic fields. Defaults to unitless. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units specified by length_unit. - Defaults to a cubic unit-length domain. - sim_time : float, optional - The simulation time in seconds - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - refine_by : integer or list/array of integers. - Specifies the refinement ratio between levels. Defaults to 2. This - can be an array, in which case it specifies for each dimension. For - instance, this can be used to say that some datasets have refinement of - 1 in one dimension, indicating that they span the full range in that - dimension. - - Examples - -------- - - >>> grid_data = [ - ... dict(left_edge = [0.0, 0.0, 0.0], - ... right_edge = [1.0, 1.0, 1.], - ... level = 0, - ... dimensions = [32, 32, 32], - ... number_of_particles = 0), - ... dict(left_edge = [0.25, 0.25, 0.25], - ... right_edge = [0.75, 0.75, 0.75], - ... level = 1, - ... dimensions = [32, 32, 32], - ... number_of_particles = 0) - ... ] - ... - >>> for g in grid_data: - ... g["density"] = (np.random.random(g["dimensions"])*2**g["level"], "g/cm**3") - ... - >>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0) - """ - - domain_dimensions = np.array(domain_dimensions) - ngrids = len(grid_data) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros((ngrids, 1), dtype="int32") - grid_left_edges = np.zeros((ngrids, 3), dtype="float64") - grid_right_edges = np.zeros((ngrids, 3), dtype="float64") - grid_dimensions = np.zeros((ngrids, 3), dtype="int32") - number_of_particles = np.zeros((ngrids, 1), dtype="int64") - parent_ids = np.zeros(ngrids, dtype="int64") - 1 - sfh = StreamDictFieldHandler() - for i, g in enumerate(grid_data): - grid_left_edges[i, :] = g.pop("left_edge") - grid_right_edges[i, :] = g.pop("right_edge") - grid_dimensions[i, :] = g.pop("dimensions") - grid_levels[i, :] = g.pop("level") - # If someone included this throw it away--old API - if "number_of_particles" in g: - issue_deprecation_warning( - "It is no longer necessary to include " - "the number of particles in the data " - "dict. The number of particles is " - "determined from the sizes of the " - "particle fields." - ) - g.pop("number_of_particles") - field_units, data, n_particles = process_data( - g, grid_dims=tuple(grid_dimensions[i, :]) - ) - number_of_particles[i, :] = n_particles - sfh[i] = data - - # We now reconstruct our parent ids, so that our particle assignment can - # proceed. - mask = np.empty(ngrids, dtype="int32") - for gi in range(ngrids): - get_box_grids_level( - grid_left_edges[gi, :], - grid_right_edges[gi, :], - grid_levels[gi] + 1, - grid_left_edges, - grid_right_edges, - grid_levels, - mask, - ) - ids = np.where(mask.astype("bool")) - for ci in ids: - parent_ids[ci] = gi - - # Check if the grid structure is properly aligned (bug #1295) - for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1): - idx = grid_levels.flatten() == lvl - dims = domain_dimensions * refine_by ** (lvl - 1) - for iax, ax in enumerate("xyz"): - cell_edges = np.linspace( - domain_left_edge[iax], domain_right_edge[iax], dims[iax], endpoint=False - ) - if set(grid_left_edges[idx, iax]) - set(cell_edges): - raise YTIllDefinedAMR(lvl, ax) - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - particle_types = {} - - for grid in sfh.values(): - particle_types.update(set_particle_types(grid)) - - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - parent_ids, - number_of_particles, - np.zeros(ngrids).reshape((ngrids, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "AMRGridData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = refine_by - if np.all(domain_dimensions[1:] == 1): - dimensionality = 1 - elif domain_dimensions[2] == 1: - dimensionality = 2 - else: - dimensionality = 3 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) - return sds - - -def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, callback=None): - r"""Given a base dataset, repeatedly apply refinement criteria and - fluid operators until a maximum level is reached. - - Parameters - ---------- - base_ds : ~yt.data_objects.static_output.Dataset - This is any static output. It can also be a stream static output, for - instance as returned by load_uniform_data. - refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` - These criteria will be applied in sequence to identify cells that need - to be refined. - fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator` - These fluid operators will be applied in sequence to all resulting - grids. - max_level : int - The maximum level to which the data will be refined - callback : function, optional - A function that will be called at the beginning of each refinement - cycle, with the current dataset. - - Examples - -------- - >>> domain_dims = (32, 32, 32) - >>> data = np.zeros(domain_dims) + 0.25 - >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})] - >>> rc = [fm.flagging_method_registry["overdensity"](8.0)] - >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0) - >>> ds = refine_amr(ug, rc, fo, 5) - """ - - # If we have particle data, set it aside for now - - number_of_particles = np.sum( - [grid.NumberOfParticles for grid in base_ds.index.grids] - ) - - if number_of_particles > 0: - pdata = {} - for field in base_ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = base_ds._get_field_info(*field) - if ( - fi.sampling_type == "particle" - and field[0] in base_ds.particle_types_raw - ): - pdata[field] = uconcatenate( - [grid[field] for grid in base_ds.index.grids] - ) - pdata["number_of_particles"] = number_of_particles - - last_gc = base_ds.index.num_grids - cur_gc = -1 - ds = base_ds - bbox = np.array( - [(ds.domain_left_edge[i], ds.domain_right_edge[i]) for i in range(3)] - ) - while ds.index.max_level < max_level and last_gc != cur_gc: - mylog.info("Refining another level. Current max level: %s", ds.index.max_level) - last_gc = ds.index.grids.size - for m in fluid_operators: - m.apply(ds) - if callback is not None: - callback(ds) - grid_data = [] - for g in ds.index.grids: - gd = dict( - left_edge=g.LeftEdge, - right_edge=g.RightEdge, - level=g.Level, - dimensions=g.ActiveDimensions, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = g[field] - grid_data.append(gd) - if g.Level < ds.index.max_level: - continue - fg = FlaggingGrid(g, refinement_criteria) - nsg = fg.find_subgrids() - for sg in nsg: - LE = sg.left_index * g.dds + ds.domain_left_edge - dims = sg.dimensions * ds.refine_by - grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims) - gd = dict( - left_edge=LE, - right_edge=grid.right_edge, - level=g.Level + 1, - dimensions=dims, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = grid[field] - grid_data.append(gd) - - ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox) - - ds.particle_types_raw = base_ds.particle_types_raw - ds.particle_types = ds.particle_types_raw - - # Now figure out where the particles go - if number_of_particles > 0: - # This will update the stream handler too - assign_particle_data(ds, pdata, bbox) - - cur_gc = ds.index.num_grids - - return ds - - class StreamParticleIndex(SPHParticleIndex): def __init__(self, ds, dataset_type=None): self.stream_handler = ds.stream_handler @@ -1286,164 +533,6 @@ def exists(fname): self.num_neighbors = n_neighbors -def load_particles( - data, - length_unit=None, - bbox=None, - sim_time=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", - data_source=None, -): - r"""Load a set of particles into yt as a - :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`. - - This will allow a collection of particle data to be loaded directly into - yt and analyzed as would any others. This comes with several caveats: - - * There must be sufficient space in memory to contain all the particle - data. - * Parallelism will be disappointing or non-existent in most cases. - * Fluid fields are not supported. - - Note: in order for the dataset to take advantage of SPH functionality, - the following two fields must be provided: - * ('io', 'density') - * ('io', 'smoothing_length') - - Parameters - ---------- - data : dict - This is a dict of numpy arrays or (numpy array, unit name) tuples, - where the keys are the field names. Particles positions must be named - "particle_position_x", "particle_position_y", and "particle_position_z". - length_unit : float - Conversion factor from simulation length units to centimeters - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length_unit - sim_time : float, optional - The simulation time in seconds - mass_unit : float - Conversion factor from simulation mass units to grams - time_unit : float - Conversion factor from simulation time units to seconds - velocity_unit : float - Conversion factor from simulation velocity units to cm/s - magnetic_unit : float - Conversion factor from simulation magnetic units to gauss - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - data_source : YTSelectionContainer, optional - If set, parameters like `bbox`, `sim_time`, and code units are derived - from it. - - Examples - -------- - - >>> pos = [np.random.random(128*128*128) for i in range(3)] - >>> data = dict(particle_position_x = pos[0], - ... particle_position_y = pos[1], - ... particle_position_z = pos[2]) - >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]]) - >>> ds = load_particles(data, 3.08e24, bbox=bbox) - - """ - - domain_dimensions = np.ones(3, "int32") - nprocs = 1 - - # Parse bounding box - if data_source is not None: - le, re = data_source.get_bbox() - le = le.to_value("code_length") - re = re.to_value("code_length") - bbox = list(zip(le, re)) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - else: - bbox = np.array(bbox) - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - # Parse simulation time - if data_source is not None: - sim_time = data_source.ds.current_time - if sim_time is None: - sim_time = 0.0 - else: - sim_time = float(sim_time) - - # Parse units - def parse_unit(unit, dimension): - if unit is None: - unit = "code_" + dimension - if data_source is not None: - unit = getattr(data_source.ds, dimension + "_unit", unit) - return unit - - length_unit = parse_unit(length_unit, "length") - mass_unit = parse_unit(mass_unit, "mass") - time_unit = parse_unit(time_unit, "time") - velocity_unit = parse_unit(velocity_unit, "velocity") - magnetic_unit = parse_unit(magnetic_unit, "magnetic") - - # Preprocess data - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - pdata = {} - for key in data.keys(): - if not isinstance(key, tuple): - field = ("io", key) - mylog.debug("Reassigning '%s' to '%s'", key, field) - else: - field = key - pdata[field] = data[key] - sfh._additional_fields += (field,) - data = pdata # Drop reference count - particle_types = set_particle_types(data) - sfh.update({"stream_file": data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "ParticleData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system) - - return sds - - _cis = np.fromiter( chain.from_iterable(product([0, 1], [0, 1], [0, 1])), dtype=np.int64, count=8 * 3 ) @@ -1550,141 +639,6 @@ class StreamHexahedralDataset(StreamDataset): _dataset_type = "stream_hexahedral" -def load_hexahedral_mesh( - data, - connectivity, - coordinates, - length_unit=None, - bbox=None, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", -): - r"""Load a hexahedral mesh of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow a semistructured grid of data to be loaded directly into - yt and analyzed as would any others. This comes with several caveats: - - * Units will be incorrect unless the data has already been converted to - cgs. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - - Particle fields are detected as one-dimensional fields. The number of particles - is set by the "number_of_particles" key in data. - - Parameters - ---------- - data : dict - This is a dict of numpy arrays, where the keys are the field names. - There must only be one. Note that the data in the numpy arrays should - define the cell-averaged value for of the quantity in in the hexahedral - cell. - connectivity : array_like - This should be of size (N,8) where N is the number of zones. - coordinates : array_like - This should be of size (M,3) where M is the number of vertices - indicated in the connectivity matrix. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length unit. - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - """ - - domain_dimensions = np.ones(3, "int32") * 2 - nprocs = 1 - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - particle_types = set_particle_types(data) - - sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data}) - # Simple check for axis length correctness - if len(data) > 0: - fn = list(sorted(data))[0] - array_values = data[fn] - if array_values.size != connectivity.shape[0]: - mylog.error( - "Dimensions of array must be one fewer than the coordinate set." - ) - raise RuntimeError - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "HexahedralMeshData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system) - - return sds - - class StreamOctreeSubset(OctreeSubset): domain_id = 1 _domain_offset = 1 @@ -1796,152 +750,6 @@ class StreamOctreeDataset(StreamDataset): _dataset_type = "stream_octree" -def load_octree( - octree_mask, - data, - bbox=None, - sim_time=0.0, - length_unit=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - over_refine_factor=1, - partial_coverage=1, - unit_system="cgs", -): - r"""Load an octree mask into yt. - - Octrees can be saved out by calling save_octree on an OctreeContainer. - This enables them to be loaded back in. - - This will initialize an Octree of data. Note that fluid fields will not - work yet, or possibly ever. - - Parameters - ---------- - octree_mask : np.ndarray[uint8_t] - This is a depth-first refinement mask for an Octree. It should be - of size n_octs * 8 (but see note about the root oct below), where - each item is 1 for an oct-cell being refined and 0 for it not being - refined. For over_refine_factors != 1, the children count will - still be 8, so there will stil be n_octs * 8 entries. Note that if - the root oct is not refined, there will be only one entry - for the root, so the size of the mask will be (n_octs - 1)*8 + 1. - data : dict - A dictionary of 1D arrays. Note that these must of the size of the - number of "False" values in the ``octree_mask``. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of length - sim_time : float, optional - The simulation time in seconds - length_unit : string - Unit to use for lengths. Defaults to unitless. - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - partial_coverage : boolean - Whether or not an oct can be refined cell-by-cell, or whether all - 8 get refined. - - Example - ------- - - >>> import yt - >>> import numpy as np - >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8, - ... 0, 0, 0, 0, 0, 0, 0, 0, - ... 8, 0, 0, 0, 0, 0, 0, 0, - ... 0] - >>> - >>> octree_mask = np.array(oct_mask, dtype=np.uint8) - >>> quantities = {} - >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8') - >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]]) - >>> - >>> ds = yt.load_octree(octree_mask=octree_mask, - ... data=quantities, - ... bbox=bbox, - ... over_refine_factor=0, - ... partial_coverage=0) - - """ - - if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8: - raise TypeError("octree_mask should be a Numpy array with type uint8") - - nz = 1 << (over_refine_factor) - domain_dimensions = np.array([nz, nz, nz]) - nprocs = 1 - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - particle_types = set_particle_types(data) - - sfh.update({0: data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "OctreeData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamOctreeDataset(handler, unit_system=unit_system) - sds.octree_mask = octree_mask - sds.partial_coverage = partial_coverage - sds.over_refine_factor = over_refine_factor - - return sds - - class StreamUnstructuredMesh(UnstructuredMesh): _index_offset = 0 @@ -1983,250 +791,3 @@ class StreamUnstructuredMeshDataset(StreamDataset): def _find_particle_types(self): pass - - -def load_unstructured_mesh( - connectivity, - coordinates, - node_data=None, - elem_data=None, - length_unit=None, - bbox=None, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(False, False, False), - geometry="cartesian", - unit_system="cgs", -): - r"""Load an unstructured mesh of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow an unstructured mesh data to be loaded directly into - yt and analyzed as would any others. Not all functionality for - visualization will be present, and some analysis functions may not yet have - been implemented. - - Particle fields are detected as one-dimensional fields. The number of - particles is set by the "number_of_particles" key in data. - - In the parameter descriptions below, a "vertex" is a 3D point in space, an - "element" is a single polyhedron whose location is defined by a set of - vertices, and a "mesh" is a set of polyhedral elements, each with the same - number of vertices. - - Parameters - ---------- - - connectivity : list of array_like or array_like - This should either be a single 2D array or list of 2D arrays. If this - is a list, each element in the list corresponds to the connectivity - information for a distinct mesh. Each array can have different - connectivity length and should be of shape (N,M) where N is the number - of elements and M is the number of vertices per element. - coordinates : array_like - The 3D coordinates of mesh vertices. This should be of size (L, D) where - L is the number of vertices and D is the number of coordinates per vertex - (the spatial dimensions of the dataset). Currently this must be either 2 or 3. - When loading more than one mesh, the data for each mesh should be concatenated - into a single coordinates array. - node_data : dict or list of dicts - For a single mesh, a dict mapping field names to 2D numpy arrays, - representing data defined at element vertices. For multiple meshes, - this must be a list of dicts. Note that these are not the values as a - function of the coordinates, but of the connectivity. Their shape - should be the same as the connectivity. This means that if the data is - in the shape of the coordinates, you may need to reshape them using the - `connectivity` array as an index. - elem_data : dict or list of dicts - For a single mesh, a dict mapping field names to 1D numpy arrays, where - each array has a length equal to the number of elements. The data - must be defined at the center of each mesh element and there must be - only one data value for each element. For multiple meshes, this must be - a list of dicts, with one dict for each mesh. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length unit. - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - Examples - -------- - - Load a simple mesh consisting of two tets. - - >>> # Coordinates for vertices of two tetrahedra - >>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], - ... [0.5, 1, 0.5], [0.5, 0.5, 0.0], - ... [0.5, 0.5, 1.0]]) - >>> # The indices in the coordinates array of mesh vertices. - >>> # This mesh has two elements. - >>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]]) - >>> - >>> # Field data defined at the centers of the two mesh elements. - >>> elem_data = { - ... ('connect1', 'elem_field'): np.array([1, 2]) - ... } - >>> - >>> # Field data defined at node vertices - >>> node_data = { - ... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0], - ... [0.0, 1.0, 2.0, 3.0]]) - ... } - >>> - >>> ds = yt.load_unstructured_mesh(connectivity, coordinates, - ... elem_data=elem_data, - ... node_data=node_data) - """ - - dimensionality = coordinates.shape[1] - domain_dimensions = np.ones(3, "int32") * 2 - nprocs = 1 - - if elem_data is None and node_data is None: - raise RuntimeError("No data supplied in load_unstructured_mesh.") - - if isinstance(connectivity, list): - num_meshes = len(connectivity) - else: - num_meshes = 1 - connectivity = ensure_list(connectivity) - - if elem_data is None: - elem_data = [{} for i in range(num_meshes)] - elem_data = ensure_list(elem_data) - - if node_data is None: - node_data = [{} for i in range(num_meshes)] - node_data = ensure_list(node_data) - - data = [{} for i in range(num_meshes)] - for elem_dict, data_dict in zip(elem_data, data): - for field, values in elem_dict.items(): - data_dict[field] = values - for node_dict, data_dict in zip(node_data, data): - for field, values in node_dict.items(): - data_dict[field] = values - data = ensure_list(data) - - if bbox is None: - bbox = [ - [ - coordinates[:, i].min() - 0.1 * abs(coordinates[:, i].min()), - coordinates[:, i].max() + 0.1 * abs(coordinates[:, i].max()), - ] - for i in range(dimensionality) - ] - - if dimensionality < 3: - bbox.append([0.0, 1.0]) - if dimensionality < 2: - bbox.append([0.0, 1.0]) - - # handle pseudo-dims here - num_pseudo_dims = get_num_pseudo_dims(coordinates) - dimensionality -= num_pseudo_dims - for i in range(dimensionality, 3): - bbox[i][0] = 0.0 - bbox[i][1] = 1.0 - - bbox = np.array(bbox, dtype=np.float64) - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units = {} - particle_types = {} - sfh = StreamDictFieldHandler() - - sfh.update({"connectivity": connectivity, "coordinates": coordinates}) - for i, d in enumerate(data): - _f_unit, _data, _ = process_data(d) - field_units.update(_f_unit) - sfh[i] = _data - particle_types.update(set_particle_types(d)) - # Simple check for axis length correctness - if 0 and len(data) > 0: - fn = list(sorted(data))[0] - array_values = data[fn] - if array_values.size != connectivity.shape[0]: - mylog.error( - "Dimensions of array must be one fewer than the coordinate set." - ) - raise RuntimeError - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "UnstructuredMeshData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamUnstructuredMeshDataset( - handler, geometry=geometry, unit_system=unit_system - ) - - fluid_types = ["all"] - for i in range(1, num_meshes + 1): - fluid_types += ["connect%d" % i] - sds.fluid_types = tuple(fluid_types) - - def flatten(l): - return [item for sublist in l for item in sublist] - - sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m]) - sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m]) - sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1] - - return sds diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index e69de29bb2d..a0dd2044271 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -0,0 +1,362 @@ +from collections import defaultdict + +import numpy as np + +from yt.funcs import iterable +from yt.geometry.grid_container import GridTree, MatchPointsToGrids +from yt.units.yt_array import uconcatenate +from yt.utilities.exceptions import ( + YTInconsistentGridFieldShape, + YTInconsistentGridFieldShapeGridDims, + YTInconsistentParticleFieldShape, +) +from yt.utilities.flagging_methods import FlaggingGrid +from yt.utilities.logger import ytLogger as mylog + +from .fields import StreamFieldInfo + + +def assign_particle_data(ds, pdata, bbox): + + """ + Assign particle data to the grids using MatchPointsToGrids. This + will overwrite any existing particle data, so be careful! + """ + + for ptype in ds.particle_types_raw: + check_fields = [(ptype, "particle_position_x"), (ptype, "particle_position")] + if all(f not in pdata for f in check_fields): + pdata_ftype = {} + for f in [k for k in sorted(pdata)]: + if not hasattr(pdata[f], "shape"): + continue + if f == "number_of_particles": + continue + mylog.debug("Reassigning '%s' to ('%s','%s')", f, ptype, f) + pdata_ftype[ptype, f] = pdata.pop(f) + pdata_ftype.update(pdata) + pdata = pdata_ftype + + # Note: what we need to do here is a bit tricky. Because occasionally this + # gets called before we property handle the field detection, we cannot use + # any information about the index. Fortunately for us, we can generate + # most of the GridTree utilizing information we already have from the + # stream handler. + + if len(ds.stream_handler.fields) > 1: + pdata.pop("number_of_particles", None) + num_grids = len(ds.stream_handler.fields) + parent_ids = ds.stream_handler.parent_ids + num_children = np.zeros(num_grids, dtype="int64") + # We're going to do this the slow way + mask = np.empty(num_grids, dtype="bool") + for i in range(num_grids): + np.equal(parent_ids, i, mask) + num_children[i] = mask.sum() + levels = ds.stream_handler.levels.astype("int64").ravel() + grid_tree = GridTree( + num_grids, + ds.stream_handler.left_edges, + ds.stream_handler.right_edges, + ds.stream_handler.dimensions, + ds.stream_handler.parent_ids, + levels, + num_children, + ) + + grid_pdata = [] + for _ in range(num_grids): + grid = {"number_of_particles": 0} + grid_pdata.append(grid) + + for ptype in ds.particle_types_raw: + if (ptype, "particle_position_x") in pdata: + x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in "xyz") + elif (ptype, "particle_position") in pdata: + x, y, z = pdata[ptype, "particle_position"].T + else: + raise KeyError( + "Cannot decompose particle data without position fields!" + ) + pts = MatchPointsToGrids(grid_tree, len(x), x, y, z) + particle_grid_inds = pts.find_points_in_tree() + (assigned_particles,) = (particle_grid_inds >= 0).nonzero() + num_particles = particle_grid_inds.size + num_unassigned = num_particles - assigned_particles.size + if num_unassigned > 0: + eps = np.finfo(x.dtype).eps + s = np.array( + [ + [x.min() - eps, x.max() + eps], + [y.min() - eps, y.max() + eps], + [z.min() - eps, z.max() + eps], + ] + ) + sug_bbox = [ + [min(bbox[0, 0], s[0, 0]), max(bbox[0, 1], s[0, 1])], + [min(bbox[1, 0], s[1, 0]), max(bbox[1, 1], s[1, 1])], + [min(bbox[2, 0], s[2, 0]), max(bbox[2, 1], s[2, 1])], + ] + mylog.warning( + "Discarding %s particles (out of %s) that are outside " + "bounding box. Set bbox=%s to avoid this in the future.", + num_unassigned, + num_particles, + sug_bbox, + ) + particle_grid_inds = particle_grid_inds[assigned_particles] + x = x[assigned_particles] + y = y[assigned_particles] + z = z[assigned_particles] + idxs = np.argsort(particle_grid_inds) + particle_grid_count = np.bincount( + particle_grid_inds.astype("intp"), minlength=num_grids + ) + particle_indices = np.zeros(num_grids + 1, dtype="int64") + if num_grids > 1: + np.add.accumulate( + particle_grid_count.squeeze(), out=particle_indices[1:] + ) + else: + particle_indices[1] = particle_grid_count.squeeze() + for i, pcount in enumerate(particle_grid_count): + grid_pdata[i]["number_of_particles"] += pcount + start = particle_indices[i] + end = particle_indices[i + 1] + for key in pdata.keys(): + if key[0] == ptype: + grid_pdata[i][key] = pdata[key][idxs][start:end] + + else: + grid_pdata = [pdata] + + for pd, gi in zip(grid_pdata, sorted(ds.stream_handler.fields)): + ds.stream_handler.fields[gi].update(pd) + ds.stream_handler.particle_types.update(set_particle_types(pd)) + npart = ds.stream_handler.fields[gi].pop("number_of_particles", 0) + ds.stream_handler.particle_count[gi] = npart + + +def process_data(data, grid_dims=None): + new_data, field_units = {}, {} + for field, val in data.items(): + # val is a data array + if isinstance(val, np.ndarray): + # val is a YTArray + if hasattr(val, "units"): + field_units[field] = val.units + new_data[field] = val.copy().d + # val is a numpy array + else: + field_units[field] = "" + new_data[field] = val.copy() + + # val is a tuple of (data, units) + elif isinstance(val, tuple) and len(val) == 2: + try: + assert isinstance(field, (str, tuple)), "Field name is not a string!" + assert isinstance(val[0], np.ndarray), "Field data is not an ndarray!" + assert isinstance(val[1], str), "Unit specification is not a string!" + field_units[field] = val[1] + new_data[field] = val[0] + except AssertionError as e: + raise RuntimeError("The data dict appears to be invalid.\n" + str(e)) + + # val is a list of data to be turned into an array + elif iterable(val): + field_units[field] = "" + new_data[field] = np.asarray(val) + + else: + raise RuntimeError( + "The data dict appears to be invalid. " + "The data dictionary must map from field " + "names to (numpy array, unit spec) tuples. " + ) + + data = new_data + + # At this point, we have arrays for all our fields + new_data = {} + for field in data: + n_shape = len(data[field].shape) + if isinstance(field, tuple): + new_field = field + elif n_shape in (1, 2): + new_field = ("io", field) + elif n_shape == 3: + new_field = ("stream", field) + else: + raise RuntimeError + new_data[new_field] = data[field] + field_units[new_field] = field_units.pop(field) + known_fields = ( + StreamFieldInfo.known_particle_fields + StreamFieldInfo.known_other_fields + ) + # We do not want to override any of the known ones, if it's not + # overridden here. + if ( + any(f[0] == new_field[1] for f in known_fields) + and field_units[new_field] == "" + ): + field_units.pop(new_field) + data = new_data + # Sanity checking that all fields have the same dimensions. + g_shapes = [] + p_shapes = defaultdict(list) + for field in data: + f_shape = data[field].shape + n_shape = len(f_shape) + if n_shape in (1, 2): + p_shapes[field[0]].append((field[1], f_shape[0])) + elif n_shape == 3: + g_shapes.append((field, f_shape)) + if len(g_shapes) > 0: + g_s = np.array([s[1] for s in g_shapes]) + if not np.all(g_s == g_s[0]): + raise YTInconsistentGridFieldShape(g_shapes) + if grid_dims is not None: + if not np.all(g_s == grid_dims): + raise YTInconsistentGridFieldShapeGridDims(g_shapes, grid_dims) + if len(p_shapes) > 0: + for ptype, p_shape in p_shapes.items(): + p_s = np.array([s[1] for s in p_shape]) + if not np.all(p_s == p_s[0]): + raise YTInconsistentParticleFieldShape(ptype, p_shape) + # Now that we know the particle fields are consistent, determine the number + # of particles. + if len(p_shapes) > 0: + number_of_particles = np.sum([s[0][1] for s in p_shapes.values()]) + else: + number_of_particles = 0 + return field_units, data, number_of_particles + + +def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, callback=None): + r"""Given a base dataset, repeatedly apply refinement criteria and + fluid operators until a maximum level is reached. + + Parameters + ---------- + base_ds : ~yt.data_objects.static_output.Dataset + This is any static output. It can also be a stream static output, for + instance as returned by load_uniform_data. + refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` + These criteria will be applied in sequence to identify cells that need + to be refined. + fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator` + These fluid operators will be applied in sequence to all resulting + grids. + max_level : int + The maximum level to which the data will be refined + callback : function, optional + A function that will be called at the beginning of each refinement + cycle, with the current dataset. + + Examples + -------- + >>> domain_dims = (32, 32, 32) + >>> data = np.zeros(domain_dims) + 0.25 + >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})] + >>> rc = [fm.flagging_method_registry["overdensity"](8.0)] + >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0) + >>> ds = refine_amr(ug, rc, fo, 5) + """ + from .loaders import load_amr_grids + + # If we have particle data, set it aside for now + + number_of_particles = np.sum( + [grid.NumberOfParticles for grid in base_ds.index.grids] + ) + + if number_of_particles > 0: + pdata = {} + for field in base_ds.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = base_ds._get_field_info(*field) + if ( + fi.sampling_type == "particle" + and field[0] in base_ds.particle_types_raw + ): + pdata[field] = uconcatenate( + [grid[field] for grid in base_ds.index.grids] + ) + pdata["number_of_particles"] = number_of_particles + + last_gc = base_ds.index.num_grids + cur_gc = -1 + ds = base_ds + bbox = np.array( + [(ds.domain_left_edge[i], ds.domain_right_edge[i]) for i in range(3)] + ) + while ds.index.max_level < max_level and last_gc != cur_gc: + mylog.info("Refining another level. Current max level: %s", ds.index.max_level) + last_gc = ds.index.grids.size + for m in fluid_operators: + m.apply(ds) + if callback is not None: + callback(ds) + grid_data = [] + for g in ds.index.grids: + gd = dict( + left_edge=g.LeftEdge, + right_edge=g.RightEdge, + level=g.Level, + dimensions=g.ActiveDimensions, + ) + for field in ds.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = ds._get_field_info(*field) + if not fi.sampling_type == "particle": + gd[field] = g[field] + grid_data.append(gd) + if g.Level < ds.index.max_level: + continue + fg = FlaggingGrid(g, refinement_criteria) + nsg = fg.find_subgrids() + for sg in nsg: + LE = sg.left_index * g.dds + ds.domain_left_edge + dims = sg.dimensions * ds.refine_by + grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims) + gd = dict( + left_edge=LE, + right_edge=grid.right_edge, + level=g.Level + 1, + dimensions=dims, + ) + for field in ds.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = ds._get_field_info(*field) + if not fi.sampling_type == "particle": + gd[field] = grid[field] + grid_data.append(gd) + + ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox) + + ds.particle_types_raw = base_ds.particle_types_raw + ds.particle_types = ds.particle_types_raw + + # Now figure out where the particles go + if number_of_particles > 0: + # This will update the stream handler too + assign_particle_data(ds, pdata, bbox) + + cur_gc = ds.index.num_grids + + return ds + + +def set_particle_types(data): + particle_types = {} + for key in data.keys(): + if key == "number_of_particles": + continue + if len(data[key].shape) == 1: + particle_types[key] = True + else: + particle_types[key] = False + return particle_types diff --git a/yt/frontends/stream/loaders.py b/yt/frontends/stream/loaders.py new file mode 100644 index 00000000000..853c7a61d69 --- /dev/null +++ b/yt/frontends/stream/loaders.py @@ -0,0 +1,1103 @@ +import numpy as np + +from yt.frontends.exodus_ii.util import get_num_pseudo_dims +from yt.funcs import ensure_list, issue_deprecation_warning +from yt.utilities.decompose import decompose_array, get_psize +from yt.utilities.exceptions import YTIllDefinedAMR +from yt.utilities.lib.misc_utilities import get_box_grids_level +from yt.utilities.logger import ytLogger as mylog + +from .data_structures import ( + StreamDataset, + StreamDictFieldHandler, + StreamHandler, + StreamHexahedralDataset, + StreamOctreeDataset, + StreamParticlesDataset, + StreamUnstructuredMeshDataset, +) +from .definitions import assign_particle_data, process_data, set_particle_types + + +def load_uniform_grid( + data, + domain_dimensions, + length_unit=None, + bbox=None, + nprocs=1, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", +): + r"""Load a uniform grid of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow a uniform grid of data to be loaded directly into yt and + analyzed as would any others. This comes with several caveats: + + * Units will be incorrect unless the unit system is explicitly + specified. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + + Particle fields are detected as one-dimensional fields. + + Parameters + ---------- + data : dict + This is a dict of numpy arrays or (numpy array, unit spec) tuples. + The keys are the field names. + domain_dimensions : array_like + This is the domain dimensions of the grid + length_unit : string + Unit to use for lengths. Defaults to unitless. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units specified by length_unit. + Defaults to a cubic unit-length domain. + nprocs: integer, optional + If greater than 1, will create this number of subarrays out of data + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + Examples + -------- + + >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]]) + >>> arr = np.random.random((128, 128, 128)) + >>> data = dict(density=arr) + >>> ds = load_uniform_grid(data, arr.shape, length_unit='cm', + ... bbox=bbox, nprocs=12) + >>> dd = ds.all_data() + >>> dd['density'] + YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, + 0.97506269, 0.03047113]) g/cm**3 + """ + + domain_dimensions = np.array(domain_dimensions) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + # If someone included this throw it away--old API + if "number_of_particles" in data: + issue_deprecation_warning( + "It is no longer necessary to include " + "the number of particles in the data " + "dict. The number of particles is " + "determined from the sizes of the " + "particle fields." + ) + data.pop("number_of_particles") + # First we fix our field names, apply units to data + # and check for consistency of field shapes + field_units, data, number_of_particles = process_data( + data, grid_dims=tuple(domain_dimensions) + ) + + sfh = StreamDictFieldHandler() + + if number_of_particles > 0: + particle_types = set_particle_types(data) + # Used much further below. + pdata = {"number_of_particles": number_of_particles} + for key in list(data.keys()): + if len(data[key].shape) == 1 or key[0] == "io": + if not isinstance(key, tuple): + field = ("io", key) + mylog.debug("Reassigning '%s' to '%s'", key, field) + else: + field = key + sfh._additional_fields += (field,) + pdata[field] = data.pop(key) + else: + particle_types = {} + + if nprocs > 1: + temp = {} + new_data = {} + for key in data.keys(): + psize = get_psize(np.array(data[key].shape), nprocs) + grid_left_edges, grid_right_edges, shapes, slices = decompose_array( + data[key].shape, psize, bbox + ) + grid_dimensions = np.array([shape for shape in shapes], dtype="int32") + temp[key] = [data[key][slice] for slice in slices] + for gid in range(nprocs): + new_data[gid] = {} + for key in temp.keys(): + new_data[gid].update({key: temp[key][gid]}) + sfh.update(new_data) + del new_data, temp + else: + sfh.update({0: data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # particle count + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "UniformGridData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + if np.all(domain_dimensions[1:] == 1): + dimensionality = 1 + elif domain_dimensions[2] == 1: + dimensionality = 2 + else: + dimensionality = 3 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) + + # Now figure out where the particles go + if number_of_particles > 0: + # This will update the stream handler too + assign_particle_data(sds, pdata, bbox) + + return sds + + +def load_amr_grids( + grid_data, + domain_dimensions, + bbox=None, + sim_time=0.0, + length_unit=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + refine_by=2, + unit_system="cgs", +): + r"""Load a set of grids of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + This should allow a sequence of grids of varying resolution of data to be + loaded directly into yt and analyzed as would any others. This comes with + several caveats: + + * Units will be incorrect unless the unit system is explicitly specified. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + * No consistency checks are performed on the index + + Parameters + ---------- + + grid_data : list of dicts + This is a list of dicts. Each dict must have entries "left_edge", + "right_edge", "dimensions", "level", and then any remaining entries are + assumed to be fields. Field entries must map to an NDArray. The grid_data + may also include a particle count. If no particle count is supplied, the + dataset is understood to contain no particles. The grid_data will be + modified in place and can't be assumed to be static. + domain_dimensions : array_like + This is the domain dimensions of the grid + length_unit : string or float + Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox + dimensions are assumed to be in the corresponding units. If set to a float, the + value is a assumed to be the conversion from bbox dimensions to centimeters. + mass_unit : string or float + Unit to use for masses. Defaults to unitless. + time_unit : string or float + Unit to use for times. Defaults to unitless. + velocity_unit : string or float + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string or float + Unit to use for magnetic fields. Defaults to unitless. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units specified by length_unit. + Defaults to a cubic unit-length domain. + sim_time : float, optional + The simulation time in seconds + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + refine_by : integer or list/array of integers. + Specifies the refinement ratio between levels. Defaults to 2. This + can be an array, in which case it specifies for each dimension. For + instance, this can be used to say that some datasets have refinement of + 1 in one dimension, indicating that they span the full range in that + dimension. + + Examples + -------- + + >>> grid_data = [ + ... dict(left_edge = [0.0, 0.0, 0.0], + ... right_edge = [1.0, 1.0, 1.], + ... level = 0, + ... dimensions = [32, 32, 32], + ... number_of_particles = 0), + ... dict(left_edge = [0.25, 0.25, 0.25], + ... right_edge = [0.75, 0.75, 0.75], + ... level = 1, + ... dimensions = [32, 32, 32], + ... number_of_particles = 0) + ... ] + ... + >>> for g in grid_data: + ... g["density"] = (np.random.random(g["dimensions"])*2**g["level"], "g/cm**3") + ... + >>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0) + """ + + domain_dimensions = np.array(domain_dimensions) + ngrids = len(grid_data) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros((ngrids, 1), dtype="int32") + grid_left_edges = np.zeros((ngrids, 3), dtype="float64") + grid_right_edges = np.zeros((ngrids, 3), dtype="float64") + grid_dimensions = np.zeros((ngrids, 3), dtype="int32") + number_of_particles = np.zeros((ngrids, 1), dtype="int64") + parent_ids = np.zeros(ngrids, dtype="int64") - 1 + sfh = StreamDictFieldHandler() + for i, g in enumerate(grid_data): + grid_left_edges[i, :] = g.pop("left_edge") + grid_right_edges[i, :] = g.pop("right_edge") + grid_dimensions[i, :] = g.pop("dimensions") + grid_levels[i, :] = g.pop("level") + # If someone included this throw it away--old API + if "number_of_particles" in g: + issue_deprecation_warning( + "It is no longer necessary to include " + "the number of particles in the data " + "dict. The number of particles is " + "determined from the sizes of the " + "particle fields." + ) + g.pop("number_of_particles") + field_units, data, n_particles = process_data( + g, grid_dims=tuple(grid_dimensions[i, :]) + ) + number_of_particles[i, :] = n_particles + sfh[i] = data + + # We now reconstruct our parent ids, so that our particle assignment can + # proceed. + mask = np.empty(ngrids, dtype="int32") + for gi in range(ngrids): + get_box_grids_level( + grid_left_edges[gi, :], + grid_right_edges[gi, :], + grid_levels[gi] + 1, + grid_left_edges, + grid_right_edges, + grid_levels, + mask, + ) + ids = np.where(mask.astype("bool")) + for ci in ids: + parent_ids[ci] = gi + + # Check if the grid structure is properly aligned (bug #1295) + for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1): + idx = grid_levels.flatten() == lvl + dims = domain_dimensions * refine_by ** (lvl - 1) + for iax, ax in enumerate("xyz"): + cell_edges = np.linspace( + domain_left_edge[iax], domain_right_edge[iax], dims[iax], endpoint=False + ) + if set(grid_left_edges[idx, iax]) - set(cell_edges): + raise YTIllDefinedAMR(lvl, ax) + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + particle_types = {} + + for grid in sfh.values(): + particle_types.update(set_particle_types(grid)) + + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + parent_ids, + number_of_particles, + np.zeros(ngrids).reshape((ngrids, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "AMRGridData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = refine_by + if np.all(domain_dimensions[1:] == 1): + dimensionality = 1 + elif domain_dimensions[2] == 1: + dimensionality = 2 + else: + dimensionality = 3 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) + return sds + + +def load_particles( + data, + length_unit=None, + bbox=None, + sim_time=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", + data_source=None, +): + r"""Load a set of particles into yt as a + :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`. + + This will allow a collection of particle data to be loaded directly into + yt and analyzed as would any others. This comes with several caveats: + + * There must be sufficient space in memory to contain all the particle + data. + * Parallelism will be disappointing or non-existent in most cases. + * Fluid fields are not supported. + + Note: in order for the dataset to take advantage of SPH functionality, + the following two fields must be provided: + * ('io', 'density') + * ('io', 'smoothing_length') + + Parameters + ---------- + data : dict + This is a dict of numpy arrays or (numpy array, unit name) tuples, + where the keys are the field names. Particles positions must be named + "particle_position_x", "particle_position_y", and "particle_position_z". + length_unit : float + Conversion factor from simulation length units to centimeters + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length_unit + sim_time : float, optional + The simulation time in seconds + mass_unit : float + Conversion factor from simulation mass units to grams + time_unit : float + Conversion factor from simulation time units to seconds + velocity_unit : float + Conversion factor from simulation velocity units to cm/s + magnetic_unit : float + Conversion factor from simulation magnetic units to gauss + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + data_source : YTSelectionContainer, optional + If set, parameters like `bbox`, `sim_time`, and code units are derived + from it. + + Examples + -------- + + >>> pos = [np.random.random(128*128*128) for i in range(3)] + >>> data = dict(particle_position_x = pos[0], + ... particle_position_y = pos[1], + ... particle_position_z = pos[2]) + >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]]) + >>> ds = load_particles(data, 3.08e24, bbox=bbox) + + """ + + domain_dimensions = np.ones(3, "int32") + nprocs = 1 + + # Parse bounding box + if data_source is not None: + le, re = data_source.get_bbox() + le = le.to_value("code_length") + re = re.to_value("code_length") + bbox = list(zip(le, re)) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + else: + bbox = np.array(bbox) + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + # Parse simulation time + if data_source is not None: + sim_time = data_source.ds.current_time + if sim_time is None: + sim_time = 0.0 + else: + sim_time = float(sim_time) + + # Parse units + def parse_unit(unit, dimension): + if unit is None: + unit = "code_" + dimension + if data_source is not None: + unit = getattr(data_source.ds, dimension + "_unit", unit) + return unit + + length_unit = parse_unit(length_unit, "length") + mass_unit = parse_unit(mass_unit, "mass") + time_unit = parse_unit(time_unit, "time") + velocity_unit = parse_unit(velocity_unit, "velocity") + magnetic_unit = parse_unit(magnetic_unit, "magnetic") + + # Preprocess data + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + pdata = {} + for key in data.keys(): + if not isinstance(key, tuple): + field = ("io", key) + mylog.debug("Reassigning '%s' to '%s'", key, field) + else: + field = key + pdata[field] = data[key] + sfh._additional_fields += (field,) + data = pdata # Drop reference count + particle_types = set_particle_types(data) + sfh.update({"stream_file": data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "ParticleData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system) + + return sds + + +def load_hexahedral_mesh( + data, + connectivity, + coordinates, + length_unit=None, + bbox=None, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", +): + r"""Load a hexahedral mesh of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow a semistructured grid of data to be loaded directly into + yt and analyzed as would any others. This comes with several caveats: + + * Units will be incorrect unless the data has already been converted to + cgs. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + + Particle fields are detected as one-dimensional fields. The number of particles + is set by the "number_of_particles" key in data. + + Parameters + ---------- + data : dict + This is a dict of numpy arrays, where the keys are the field names. + There must only be one. Note that the data in the numpy arrays should + define the cell-averaged value for of the quantity in in the hexahedral + cell. + connectivity : array_like + This should be of size (N,8) where N is the number of zones. + coordinates : array_like + This should be of size (M,3) where M is the number of vertices + indicated in the connectivity matrix. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length unit. + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + """ + + domain_dimensions = np.ones(3, "int32") * 2 + nprocs = 1 + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + particle_types = set_particle_types(data) + + sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data}) + # Simple check for axis length correctness + if len(data) > 0: + fn = list(sorted(data))[0] + array_values = data[fn] + if array_values.size != connectivity.shape[0]: + mylog.error( + "Dimensions of array must be one fewer than the coordinate set." + ) + raise RuntimeError + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "HexahedralMeshData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system) + + return sds + + +def load_octree( + octree_mask, + data, + bbox=None, + sim_time=0.0, + length_unit=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + over_refine_factor=1, + partial_coverage=1, + unit_system="cgs", +): + r"""Load an octree mask into yt. + + Octrees can be saved out by calling save_octree on an OctreeContainer. + This enables them to be loaded back in. + + This will initialize an Octree of data. Note that fluid fields will not + work yet, or possibly ever. + + Parameters + ---------- + octree_mask : np.ndarray[uint8_t] + This is a depth-first refinement mask for an Octree. It should be + of size n_octs * 8 (but see note about the root oct below), where + each item is 1 for an oct-cell being refined and 0 for it not being + refined. For over_refine_factors != 1, the children count will + still be 8, so there will stil be n_octs * 8 entries. Note that if + the root oct is not refined, there will be only one entry + for the root, so the size of the mask will be (n_octs - 1)*8 + 1. + data : dict + A dictionary of 1D arrays. Note that these must of the size of the + number of "False" values in the ``octree_mask``. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of length + sim_time : float, optional + The simulation time in seconds + length_unit : string + Unit to use for lengths. Defaults to unitless. + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + partial_coverage : boolean + Whether or not an oct can be refined cell-by-cell, or whether all + 8 get refined. + + Example + ------- + + >>> import yt + >>> import numpy as np + >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8, + ... 0, 0, 0, 0, 0, 0, 0, 0, + ... 8, 0, 0, 0, 0, 0, 0, 0, + ... 0] + >>> + >>> octree_mask = np.array(oct_mask, dtype=np.uint8) + >>> quantities = {} + >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8') + >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]]) + >>> + >>> ds = yt.load_octree(octree_mask=octree_mask, + ... data=quantities, + ... bbox=bbox, + ... over_refine_factor=0, + ... partial_coverage=0) + + """ + + if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8: + raise TypeError("octree_mask should be a Numpy array with type uint8") + + nz = 1 << (over_refine_factor) + domain_dimensions = np.array([nz, nz, nz]) + nprocs = 1 + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + particle_types = set_particle_types(data) + + sfh.update({0: data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "OctreeData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamOctreeDataset(handler, unit_system=unit_system) + sds.octree_mask = octree_mask + sds.partial_coverage = partial_coverage + sds.over_refine_factor = over_refine_factor + + return sds + + +def load_unstructured_mesh( + connectivity, + coordinates, + node_data=None, + elem_data=None, + length_unit=None, + bbox=None, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(False, False, False), + geometry="cartesian", + unit_system="cgs", +): + r"""Load an unstructured mesh of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow an unstructured mesh data to be loaded directly into + yt and analyzed as would any others. Not all functionality for + visualization will be present, and some analysis functions may not yet have + been implemented. + + Particle fields are detected as one-dimensional fields. The number of + particles is set by the "number_of_particles" key in data. + + In the parameter descriptions below, a "vertex" is a 3D point in space, an + "element" is a single polyhedron whose location is defined by a set of + vertices, and a "mesh" is a set of polyhedral elements, each with the same + number of vertices. + + Parameters + ---------- + + connectivity : list of array_like or array_like + This should either be a single 2D array or list of 2D arrays. If this + is a list, each element in the list corresponds to the connectivity + information for a distinct mesh. Each array can have different + connectivity length and should be of shape (N,M) where N is the number + of elements and M is the number of vertices per element. + coordinates : array_like + The 3D coordinates of mesh vertices. This should be of size (L, D) where + L is the number of vertices and D is the number of coordinates per vertex + (the spatial dimensions of the dataset). Currently this must be either 2 or 3. + When loading more than one mesh, the data for each mesh should be concatenated + into a single coordinates array. + node_data : dict or list of dicts + For a single mesh, a dict mapping field names to 2D numpy arrays, + representing data defined at element vertices. For multiple meshes, + this must be a list of dicts. Note that these are not the values as a + function of the coordinates, but of the connectivity. Their shape + should be the same as the connectivity. This means that if the data is + in the shape of the coordinates, you may need to reshape them using the + `connectivity` array as an index. + elem_data : dict or list of dicts + For a single mesh, a dict mapping field names to 1D numpy arrays, where + each array has a length equal to the number of elements. The data + must be defined at the center of each mesh element and there must be + only one data value for each element. For multiple meshes, this must be + a list of dicts, with one dict for each mesh. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length unit. + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + Examples + -------- + + Load a simple mesh consisting of two tets. + + >>> # Coordinates for vertices of two tetrahedra + >>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], + ... [0.5, 1, 0.5], [0.5, 0.5, 0.0], + ... [0.5, 0.5, 1.0]]) + >>> # The indices in the coordinates array of mesh vertices. + >>> # This mesh has two elements. + >>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]]) + >>> + >>> # Field data defined at the centers of the two mesh elements. + >>> elem_data = { + ... ('connect1', 'elem_field'): np.array([1, 2]) + ... } + >>> + >>> # Field data defined at node vertices + >>> node_data = { + ... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0], + ... [0.0, 1.0, 2.0, 3.0]]) + ... } + >>> + >>> ds = yt.load_unstructured_mesh(connectivity, coordinates, + ... elem_data=elem_data, + ... node_data=node_data) + """ + + dimensionality = coordinates.shape[1] + domain_dimensions = np.ones(3, "int32") * 2 + nprocs = 1 + + if elem_data is None and node_data is None: + raise RuntimeError("No data supplied in load_unstructured_mesh.") + + if isinstance(connectivity, list): + num_meshes = len(connectivity) + else: + num_meshes = 1 + connectivity = ensure_list(connectivity) + + if elem_data is None: + elem_data = [{} for i in range(num_meshes)] + elem_data = ensure_list(elem_data) + + if node_data is None: + node_data = [{} for i in range(num_meshes)] + node_data = ensure_list(node_data) + + data = [{} for i in range(num_meshes)] + for elem_dict, data_dict in zip(elem_data, data): + for field, values in elem_dict.items(): + data_dict[field] = values + for node_dict, data_dict in zip(node_data, data): + for field, values in node_dict.items(): + data_dict[field] = values + data = ensure_list(data) + + if bbox is None: + bbox = [ + [ + coordinates[:, i].min() - 0.1 * abs(coordinates[:, i].min()), + coordinates[:, i].max() + 0.1 * abs(coordinates[:, i].max()), + ] + for i in range(dimensionality) + ] + + if dimensionality < 3: + bbox.append([0.0, 1.0]) + if dimensionality < 2: + bbox.append([0.0, 1.0]) + + # handle pseudo-dims here + num_pseudo_dims = get_num_pseudo_dims(coordinates) + dimensionality -= num_pseudo_dims + for i in range(dimensionality, 3): + bbox[i][0] = 0.0 + bbox[i][1] = 1.0 + + bbox = np.array(bbox, dtype=np.float64) + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units = {} + particle_types = {} + sfh = StreamDictFieldHandler() + + sfh.update({"connectivity": connectivity, "coordinates": coordinates}) + for i, d in enumerate(data): + _f_unit, _data, _ = process_data(d) + field_units.update(_f_unit) + sfh[i] = _data + particle_types.update(set_particle_types(d)) + # Simple check for axis length correctness + if 0 and len(data) > 0: + fn = list(sorted(data))[0] + array_values = data[fn] + if array_values.size != connectivity.shape[0]: + mylog.error( + "Dimensions of array must be one fewer than the coordinate set." + ) + raise RuntimeError + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "UnstructuredMeshData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamUnstructuredMeshDataset( + handler, geometry=geometry, unit_system=unit_system + ) + + fluid_types = ["all"] + for i in range(1, num_meshes + 1): + fluid_types += ["connect%d" % i] + sds.fluid_types = tuple(fluid_types) + + def flatten(l): + return [item for sublist in l for item in sublist] + + sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m]) + sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m]) + sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1] + + return sds diff --git a/yt/frontends/stream/tests/test_outputs.py b/yt/frontends/stream/tests/test_outputs.py index e9fe9f39204..74b8fbfcc02 100644 --- a/yt/frontends/stream/tests/test_outputs.py +++ b/yt/frontends/stream/tests/test_outputs.py @@ -5,7 +5,7 @@ import numpy as np -from yt.frontends.stream.data_structures import load_particles, load_uniform_grid +from yt.frontends.stream.loaders import load_particles, load_uniform_grid from yt.testing import assert_equal, assert_raises from yt.utilities.exceptions import ( YTInconsistentGridFieldShape, diff --git a/yt/loaders.py b/yt/loaders.py index 5315668520c..8808fef2964 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -5,8 +5,8 @@ # note: in the future, functions could be moved here instead # in which case, this file should be removed from flake8 ignore list in setup.cfg -from .convenience import load, load_simulation -from .frontends.stream.api import ( +from .convenience import load, load_simulation, simulation +from .frontends.stream.loaders import ( load_amr_grids, load_hexahedral_mesh, load_octree, diff --git a/yt/utilities/tests/test_particle_generator.py b/yt/utilities/tests/test_particle_generator.py index 9a4c73b85a0..f0aa01a800a 100644 --- a/yt/utilities/tests/test_particle_generator.py +++ b/yt/utilities/tests/test_particle_generator.py @@ -2,7 +2,8 @@ import yt.utilities.flagging_methods as fm import yt.utilities.initial_conditions as ic -from yt.loaders import load_uniform_grid, refine_amr +from yt.frontends.stream.definitions import refine_amr +from yt.loaders import load_uniform_grid from yt.testing import assert_almost_equal, assert_equal from yt.units.yt_array import uconcatenate from yt.utilities.particle_generator import ( From 901b5dbc3dfbbce85e267f79e307155b3e0a8cec Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 9 Aug 2020 23:01:50 +0200 Subject: [PATCH 490/653] update docstring --- yt/frontends/stream/definitions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index a0dd2044271..5a596457c02 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -240,7 +240,7 @@ def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, callbac ---------- base_ds : ~yt.data_objects.static_output.Dataset This is any static output. It can also be a stream static output, for - instance as returned by load_uniform_data. + instance as returned by `yt.loaders.load_uniform_data`. refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` These criteria will be applied in sequence to identify cells that need to be refined. From 15ef2b327b81135ec51801214cd3070077066d6d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 10:48:20 +0200 Subject: [PATCH 491/653] make refine_amr a method of the StreamDataset class --- yt/frontends/stream/data_structures.py | 131 ++++++++++++++++++++++++- yt/frontends/stream/definitions.py | 124 ++--------------------- 2 files changed, 137 insertions(+), 118 deletions(-) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index babda0af5b6..e883d332334 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -21,7 +21,8 @@ from yt.geometry.oct_container import OctreeContainer from yt.geometry.oct_geometry_handler import OctreeIndex from yt.geometry.unstructured_mesh_handler import UnstructuredIndex -from yt.units.yt_array import YTQuantity +from yt.units.yt_array import YTQuantity, uconcatenate +from yt.utilities.flagging_methods import FlaggingGrid from yt.utilities.io_handler import io_registry from yt.utilities.lib.cykdtree import PyKDTree from yt.utilities.lib.misc_utilities import get_box_grids_level @@ -31,7 +32,7 @@ ) from yt.utilities.logger import ytLogger as mylog -from .definitions import process_data, set_particle_types +from .definitions import assign_particle_data, process_data, set_particle_types from .fields import StreamFieldInfo @@ -351,6 +352,132 @@ def _find_particle_types(self): self.particle_types = tuple(particle_types) self.particle_types_raw = self.particle_types + def refine_amr( + self, refinement_criteria, fluid_operators, max_level, callback=None + ): + r"""Given a base dataset, repeatedly apply refinement criteria and + fluid operators until a maximum level is reached. + + Parameters + ---------- + self : ~yt.data_objects.static_output.Dataset + This is any static output. It can also be a stream static output, for + instance as returned by `yt.loaders.load_uniform_data`. + refinement_critera : + list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` + These criteria will be applied in sequence to identify cells that need + to be refined. + fluid_operators : + list of :class:`~yt.utilities.initial_conditions.FluidOperator` + These fluid operators will be applied in sequence to all resulting + grids. + max_level : int + The maximum level to which the data will be refined + callback : function, optional + A function that will be called at the beginning of each refinement + cycle, with the current dataset. + + Examples + -------- + >>> domain_dims = (32, 32, 32) + >>> data = np.zeros(domain_dims) + 0.25 + >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})] + >>> rc = [fm.flagging_method_registry["overdensity"](8.0)] + >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0) + >>> ds = ug.refine_amr(rc, fo, 5) + """ + from .loaders import load_amr_grids + + # If we have particle data, set it aside for now + + number_of_particles = np.sum( + [grid.NumberOfParticles for grid in self.index.grids] + ) + + if number_of_particles > 0: + pdata = {} + for field in self.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = self._get_field_info(*field) + if ( + fi.sampling_type == "particle" + and field[0] in self.particle_types_raw + ): + pdata[field] = uconcatenate( + [grid[field] for grid in self.index.grids] + ) + pdata["number_of_particles"] = number_of_particles + + last_gc = self.index.num_grids + cur_gc = -1 + ds = self + bbox = np.array( + [(ds.domain_left_edge[i], ds.domain_right_edge[i]) for i in range(3)] + ) + while ds.index.max_level < max_level and last_gc != cur_gc: + mylog.info( + "Refining another level. Current max level: %s", ds.index.max_level + ) + last_gc = ds.index.grids.size + for m in fluid_operators: + m.apply(ds) + if callback is not None: + callback(ds) + grid_data = [] + for g in ds.index.grids: + gd = dict( + left_edge=g.LeftEdge, + right_edge=g.RightEdge, + level=g.Level, + dimensions=g.ActiveDimensions, + ) + for field in ds.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = ds._get_field_info(*field) + if not fi.sampling_type == "particle": + gd[field] = g[field] + grid_data.append(gd) + if g.Level < ds.index.max_level: + continue + fg = FlaggingGrid(g, refinement_criteria) + nsg = fg.find_subgrids() + for sg in nsg: + LE = sg.left_index * g.dds + ds.domain_left_edge + dims = sg.dimensions * ds.refine_by + grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims) + gd = dict( + left_edge=LE, + right_edge=grid.right_edge, + level=g.Level + 1, + dimensions=dims, + ) + for field in ds.field_list: + if not isinstance(field, tuple): + field = ("unknown", field) + fi = ds._get_field_info(*field) + if not fi.sampling_type == "particle": + gd[field] = grid[field] + grid_data.append(gd) + + # todo : use StreamDataset.__init__() instead of this loader function + # this is a fast way to build a copy of the dataset + # but a lot of data is lost on the way + ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox) + + ds.particle_types_raw = self.particle_types_raw + ds.particle_types = ds.particle_types_raw + + # Now figure out where the particles go + if number_of_particles > 0: + # This will update the stream handler too + assign_particle_data(ds, pdata, bbox) + + cur_gc = ds.index.num_grids + + return ds + class StreamDictFieldHandler(dict): _additional_fields = () diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index 5a596457c02..bad08175592 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -2,15 +2,13 @@ import numpy as np -from yt.funcs import iterable +from yt.funcs import issue_deprecation_warning, iterable from yt.geometry.grid_container import GridTree, MatchPointsToGrids -from yt.units.yt_array import uconcatenate from yt.utilities.exceptions import ( YTInconsistentGridFieldShape, YTInconsistentGridFieldShapeGridDims, YTInconsistentParticleFieldShape, ) -from yt.utilities.flagging_methods import FlaggingGrid from yt.utilities.logger import ytLogger as mylog from .fields import StreamFieldInfo @@ -233,121 +231,15 @@ def process_data(data, grid_dims=None): def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, callback=None): - r"""Given a base dataset, repeatedly apply refinement criteria and - fluid operators until a maximum level is reached. - - Parameters - ---------- - base_ds : ~yt.data_objects.static_output.Dataset - This is any static output. It can also be a stream static output, for - instance as returned by `yt.loaders.load_uniform_data`. - refinement_critera : list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` - These criteria will be applied in sequence to identify cells that need - to be refined. - fluid_operators : list of :class:`~yt.utilities.initial_conditions.FluidOperator` - These fluid operators will be applied in sequence to all resulting - grids. - max_level : int - The maximum level to which the data will be refined - callback : function, optional - A function that will be called at the beginning of each refinement - cycle, with the current dataset. - - Examples - -------- - >>> domain_dims = (32, 32, 32) - >>> data = np.zeros(domain_dims) + 0.25 - >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})] - >>> rc = [fm.flagging_method_registry["overdensity"](8.0)] - >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0) - >>> ds = refine_amr(ug, rc, fo, 5) - """ - from .loaders import load_amr_grids - - # If we have particle data, set it aside for now - - number_of_particles = np.sum( - [grid.NumberOfParticles for grid in base_ds.index.grids] + issue_deprecation_warning( + "yt.refine_amr is now a method of the StreamDataset class" ) - - if number_of_particles > 0: - pdata = {} - for field in base_ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = base_ds._get_field_info(*field) - if ( - fi.sampling_type == "particle" - and field[0] in base_ds.particle_types_raw - ): - pdata[field] = uconcatenate( - [grid[field] for grid in base_ds.index.grids] - ) - pdata["number_of_particles"] = number_of_particles - - last_gc = base_ds.index.num_grids - cur_gc = -1 - ds = base_ds - bbox = np.array( - [(ds.domain_left_edge[i], ds.domain_right_edge[i]) for i in range(3)] + return base_ds.refine_amr( + refinement_criteria=refinement_criteria, + fluid_operators=fluid_operators, + max_level=max_level, + callback=callback, ) - while ds.index.max_level < max_level and last_gc != cur_gc: - mylog.info("Refining another level. Current max level: %s", ds.index.max_level) - last_gc = ds.index.grids.size - for m in fluid_operators: - m.apply(ds) - if callback is not None: - callback(ds) - grid_data = [] - for g in ds.index.grids: - gd = dict( - left_edge=g.LeftEdge, - right_edge=g.RightEdge, - level=g.Level, - dimensions=g.ActiveDimensions, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = g[field] - grid_data.append(gd) - if g.Level < ds.index.max_level: - continue - fg = FlaggingGrid(g, refinement_criteria) - nsg = fg.find_subgrids() - for sg in nsg: - LE = sg.left_index * g.dds + ds.domain_left_edge - dims = sg.dimensions * ds.refine_by - grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims) - gd = dict( - left_edge=LE, - right_edge=grid.right_edge, - level=g.Level + 1, - dimensions=dims, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = grid[field] - grid_data.append(gd) - - ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox) - - ds.particle_types_raw = base_ds.particle_types_raw - ds.particle_types = ds.particle_types_raw - - # Now figure out where the particles go - if number_of_particles > 0: - # This will update the stream handler too - assign_particle_data(ds, pdata, bbox) - - cur_gc = ds.index.num_grids - - return ds def set_particle_types(data): From a74d093a1cb88dde4d43e0d6dace5f5e90bb9985 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 11:01:40 +0200 Subject: [PATCH 492/653] switch to the new method syntax for StreamDataset.refine_amr in tests and docstrings --- yt/frontends/stream/tests/test_stream_particles.py | 13 ++++--------- yt/utilities/tests/test_particle_generator.py | 3 +-- 2 files changed, 5 insertions(+), 11 deletions(-) diff --git a/yt/frontends/stream/tests/test_stream_particles.py b/yt/frontends/stream/tests/test_stream_particles.py index 8ec84d08145..c675c9f36ac 100644 --- a/yt/frontends/stream/tests/test_stream_particles.py +++ b/yt/frontends/stream/tests/test_stream_particles.py @@ -2,12 +2,7 @@ import yt.utilities.flagging_methods as fm import yt.utilities.initial_conditions as ic -from yt.frontends.stream.api import ( - load_amr_grids, - load_particles, - load_uniform_grid, - refine_amr, -) +from yt.frontends.stream.api import load_amr_grids, load_particles, load_uniform_grid from yt.testing import assert_equal, fake_particle_ds, fake_sph_orientation_ds # Field information @@ -32,7 +27,7 @@ def test_stream_particles(): # Check that all of this runs ok without particles ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8) - amr0 = refine_amr(ug0, rc, fo, 3) + amr0 = ug0.refine_amr(rc, fo, 3) grid_data = [] @@ -116,7 +111,7 @@ def test_stream_particles(): # Now refine this - amr1 = refine_amr(ug1, rc, fo, 3) + amr1 = ug1.refine_amr(rc, fo, 3) for field in sorted(ug1.field_list): assert field in amr1.field_list @@ -263,7 +258,7 @@ def test_stream_particles(): # Now refine this - amr3 = refine_amr(ug3, rc, fo, 3) + amr3 = ug3.refine_amr(rc, fo, 3) for field in sorted(ug3.field_list): assert field in amr3.field_list diff --git a/yt/utilities/tests/test_particle_generator.py b/yt/utilities/tests/test_particle_generator.py index f0aa01a800a..9ad90880d8e 100644 --- a/yt/utilities/tests/test_particle_generator.py +++ b/yt/utilities/tests/test_particle_generator.py @@ -2,7 +2,6 @@ import yt.utilities.flagging_methods as fm import yt.utilities.initial_conditions as ic -from yt.frontends.stream.definitions import refine_amr from yt.loaders import load_uniform_grid from yt.testing import assert_almost_equal, assert_equal from yt.units.yt_array import uconcatenate @@ -22,7 +21,7 @@ def test_particle_generator(): ug = load_uniform_grid(fields, domain_dims, 1.0) fo = [ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})] rc = [fm.flagging_method_registry["overdensity"](4.0)] - ds = refine_amr(ug, rc, fo, 3) + ds = ug.refine_amr(rc, fo, 3) # Now generate particles from density From 18f37bfaff757c8b4459651771c55ed3c62d2363 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 16:23:53 +0200 Subject: [PATCH 493/653] remove refine_amr method completely --- yt/frontends/stream/api.py | 1 - yt/frontends/stream/data_structures.py | 131 +------------ yt/frontends/stream/definitions.py | 14 +- .../stream/tests/test_stream_particles.py | 172 +----------------- yt/utilities/tests/test_particle_generator.py | 7 +- 5 files changed, 6 insertions(+), 319 deletions(-) diff --git a/yt/frontends/stream/api.py b/yt/frontends/stream/api.py index 14d10008df5..9e3b2364954 100644 --- a/yt/frontends/stream/api.py +++ b/yt/frontends/stream/api.py @@ -6,7 +6,6 @@ StreamHierarchy, hexahedral_connectivity, ) -from .definitions import refine_amr from .fields import StreamFieldInfo from .io import IOHandlerStream from .loaders import ( diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index e883d332334..8004a2870a3 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -21,8 +21,7 @@ from yt.geometry.oct_container import OctreeContainer from yt.geometry.oct_geometry_handler import OctreeIndex from yt.geometry.unstructured_mesh_handler import UnstructuredIndex -from yt.units.yt_array import YTQuantity, uconcatenate -from yt.utilities.flagging_methods import FlaggingGrid +from yt.units import YTQuantity from yt.utilities.io_handler import io_registry from yt.utilities.lib.cykdtree import PyKDTree from yt.utilities.lib.misc_utilities import get_box_grids_level @@ -32,7 +31,7 @@ ) from yt.utilities.logger import ytLogger as mylog -from .definitions import assign_particle_data, process_data, set_particle_types +from .definitions import process_data, set_particle_types from .fields import StreamFieldInfo @@ -352,132 +351,6 @@ def _find_particle_types(self): self.particle_types = tuple(particle_types) self.particle_types_raw = self.particle_types - def refine_amr( - self, refinement_criteria, fluid_operators, max_level, callback=None - ): - r"""Given a base dataset, repeatedly apply refinement criteria and - fluid operators until a maximum level is reached. - - Parameters - ---------- - self : ~yt.data_objects.static_output.Dataset - This is any static output. It can also be a stream static output, for - instance as returned by `yt.loaders.load_uniform_data`. - refinement_critera : - list of :class:`~yt.utilities.flagging_methods.FlaggingMethod` - These criteria will be applied in sequence to identify cells that need - to be refined. - fluid_operators : - list of :class:`~yt.utilities.initial_conditions.FluidOperator` - These fluid operators will be applied in sequence to all resulting - grids. - max_level : int - The maximum level to which the data will be refined - callback : function, optional - A function that will be called at the beginning of each refinement - cycle, with the current dataset. - - Examples - -------- - >>> domain_dims = (32, 32, 32) - >>> data = np.zeros(domain_dims) + 0.25 - >>> fo = [ic.CoredSphere(0.05, 0.3, [0.7,0.4,0.75], {"Density": (0.25, 100.0)})] - >>> rc = [fm.flagging_method_registry["overdensity"](8.0)] - >>> ug = load_uniform_grid({'Density': data}, domain_dims, 1.0) - >>> ds = ug.refine_amr(rc, fo, 5) - """ - from .loaders import load_amr_grids - - # If we have particle data, set it aside for now - - number_of_particles = np.sum( - [grid.NumberOfParticles for grid in self.index.grids] - ) - - if number_of_particles > 0: - pdata = {} - for field in self.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = self._get_field_info(*field) - if ( - fi.sampling_type == "particle" - and field[0] in self.particle_types_raw - ): - pdata[field] = uconcatenate( - [grid[field] for grid in self.index.grids] - ) - pdata["number_of_particles"] = number_of_particles - - last_gc = self.index.num_grids - cur_gc = -1 - ds = self - bbox = np.array( - [(ds.domain_left_edge[i], ds.domain_right_edge[i]) for i in range(3)] - ) - while ds.index.max_level < max_level and last_gc != cur_gc: - mylog.info( - "Refining another level. Current max level: %s", ds.index.max_level - ) - last_gc = ds.index.grids.size - for m in fluid_operators: - m.apply(ds) - if callback is not None: - callback(ds) - grid_data = [] - for g in ds.index.grids: - gd = dict( - left_edge=g.LeftEdge, - right_edge=g.RightEdge, - level=g.Level, - dimensions=g.ActiveDimensions, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = g[field] - grid_data.append(gd) - if g.Level < ds.index.max_level: - continue - fg = FlaggingGrid(g, refinement_criteria) - nsg = fg.find_subgrids() - for sg in nsg: - LE = sg.left_index * g.dds + ds.domain_left_edge - dims = sg.dimensions * ds.refine_by - grid = ds.smoothed_covering_grid(g.Level + 1, LE, dims) - gd = dict( - left_edge=LE, - right_edge=grid.right_edge, - level=g.Level + 1, - dimensions=dims, - ) - for field in ds.field_list: - if not isinstance(field, tuple): - field = ("unknown", field) - fi = ds._get_field_info(*field) - if not fi.sampling_type == "particle": - gd[field] = grid[field] - grid_data.append(gd) - - # todo : use StreamDataset.__init__() instead of this loader function - # this is a fast way to build a copy of the dataset - # but a lot of data is lost on the way - ds = load_amr_grids(grid_data, ds.domain_dimensions, bbox=bbox) - - ds.particle_types_raw = self.particle_types_raw - ds.particle_types = ds.particle_types_raw - - # Now figure out where the particles go - if number_of_particles > 0: - # This will update the stream handler too - assign_particle_data(ds, pdata, bbox) - - cur_gc = ds.index.num_grids - - return ds - class StreamDictFieldHandler(dict): _additional_fields = () diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index bad08175592..659f0a35b55 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -2,7 +2,7 @@ import numpy as np -from yt.funcs import issue_deprecation_warning, iterable +from yt.funcs import iterable from yt.geometry.grid_container import GridTree, MatchPointsToGrids from yt.utilities.exceptions import ( YTInconsistentGridFieldShape, @@ -230,18 +230,6 @@ def process_data(data, grid_dims=None): return field_units, data, number_of_particles -def refine_amr(base_ds, refinement_criteria, fluid_operators, max_level, callback=None): - issue_deprecation_warning( - "yt.refine_amr is now a method of the StreamDataset class" - ) - return base_ds.refine_amr( - refinement_criteria=refinement_criteria, - fluid_operators=fluid_operators, - max_level=max_level, - callback=callback, - ) - - def set_particle_types(data): particle_types = {} for key in data.keys(): diff --git a/yt/frontends/stream/tests/test_stream_particles.py b/yt/frontends/stream/tests/test_stream_particles.py index c675c9f36ac..1835fcf8fb8 100644 --- a/yt/frontends/stream/tests/test_stream_particles.py +++ b/yt/frontends/stream/tests/test_stream_particles.py @@ -1,8 +1,7 @@ import numpy as np -import yt.utilities.flagging_methods as fm import yt.utilities.initial_conditions as ic -from yt.frontends.stream.api import load_amr_grids, load_particles, load_uniform_grid +from yt.frontends.stream.api import load_particles, load_uniform_grid from yt.testing import assert_equal, fake_particle_ds, fake_sph_orientation_ds # Field information @@ -22,31 +21,8 @@ def test_stream_particles(): fo = [] fo.append(ic.TopHatSphere(0.1, [0.2, 0.3, 0.4], {"density": 2.0})) fo.append(ic.TopHatSphere(0.05, [0.7, 0.4, 0.75], {"density": 20.0})) - rc = [fm.flagging_method_registry["overdensity"](1.0)] - # Check that all of this runs ok without particles - - ug0 = load_uniform_grid({"density": dens}, domain_dims, 1.0, nprocs=8) - amr0 = ug0.refine_amr(rc, fo, 3) - - grid_data = [] - - for grid in amr0.index.grids: - - data = dict( - left_edge=grid.LeftEdge, - right_edge=grid.RightEdge, - level=grid.Level, - dimensions=grid.ActiveDimensions, - ) - - for field in amr0.field_list: - data[field] = grid[field] - grid_data.append(data) - - amr0 = load_amr_grids(grid_data, domain_dims) - - # Now add particles + # Add particles fields1 = { "density": dens, @@ -109,75 +85,6 @@ def test_stream_particles(): assert ug2._get_field_info(ptype, "particle_mass").sampling_type == "particle" assert not ug2._get_field_info("gas", "density").sampling_type == "particle" - # Now refine this - - amr1 = ug1.refine_amr(rc, fo, 3) - for field in sorted(ug1.field_list): - assert field in amr1.field_list - - grid_data = [] - - for grid in amr1.index.grids: - - data = dict( - left_edge=grid.LeftEdge, - right_edge=grid.RightEdge, - level=grid.Level, - dimensions=grid.ActiveDimensions, - ) - - for field in amr1.field_list: - if field[0] not in ("all", "nbody"): - data[field] = grid[field] - - grid_data.append(data) - - amr2 = load_amr_grids(grid_data, domain_dims) - - # Check everything again - - number_of_particles1 = [grid.NumberOfParticles for grid in amr1.index.grids] - number_of_particles2 = [grid.NumberOfParticles for grid in amr2.index.grids] - - assert_equal(np.sum(number_of_particles1), num_particles) - assert_equal(number_of_particles1, number_of_particles2) - - for grid in amr1.index.grids: - tot_parts = grid["io", "particle_position_x"].size - tot_all_parts = grid["all", "particle_position_x"].size - assert tot_parts == grid.NumberOfParticles - assert tot_all_parts == grid.NumberOfParticles - - for grid in amr2.index.grids: - tot_parts = grid["io", "particle_position_x"].size - tot_all_parts = grid["all", "particle_position_x"].size - assert tot_parts == grid.NumberOfParticles - assert tot_all_parts == grid.NumberOfParticles - - assert ( - amr1._get_field_info("all", "particle_position_x").sampling_type == "particle" - ) - assert ( - amr1._get_field_info("all", "particle_position_y").sampling_type == "particle" - ) - assert ( - amr1._get_field_info("all", "particle_position_z").sampling_type == "particle" - ) - assert amr1._get_field_info("all", "particle_mass").sampling_type == "particle" - assert not amr1._get_field_info("gas", "density").sampling_type == "particle" - - assert ( - amr2._get_field_info("all", "particle_position_x").sampling_type == "particle" - ) - assert ( - amr2._get_field_info("all", "particle_position_y").sampling_type == "particle" - ) - assert ( - amr2._get_field_info("all", "particle_position_z").sampling_type == "particle" - ) - assert amr2._get_field_info("all", "particle_mass").sampling_type == "particle" - assert not amr2._get_field_info("gas", "density").sampling_type == "particle" - # Now perform similar checks, but with multiple particle types num_dm_particles = 30000 @@ -256,81 +163,6 @@ def test_stream_particles(): ) assert ug4._get_field_info(ptype, "particle_mass").sampling_type == "particle" - # Now refine this - - amr3 = ug3.refine_amr(rc, fo, 3) - for field in sorted(ug3.field_list): - assert field in amr3.field_list - - grid_data = [] - - for grid in amr3.index.grids: - - data = dict( - left_edge=grid.LeftEdge, - right_edge=grid.RightEdge, - level=grid.Level, - dimensions=grid.ActiveDimensions, - ) - - for field in amr3.field_list: - if field[0] not in ("all", "nbody"): - data[field] = grid[field] - - grid_data.append(data) - - amr4 = load_amr_grids(grid_data, domain_dims) - - # Check everything again - - number_of_particles3 = [grid.NumberOfParticles for grid in amr3.index.grids] - number_of_particles4 = [grid.NumberOfParticles for grid in amr4.index.grids] - - assert_equal(np.sum(number_of_particles3), num_star_particles + num_dm_particles) - assert_equal(number_of_particles3, number_of_particles4) - - for ptype in ("dm", "star"): - assert ( - amr3._get_field_info(ptype, "particle_position_x").sampling_type - == "particle" - ) - assert ( - amr3._get_field_info(ptype, "particle_position_y").sampling_type - == "particle" - ) - assert ( - amr3._get_field_info(ptype, "particle_position_z").sampling_type - == "particle" - ) - assert amr3._get_field_info(ptype, "particle_mass").sampling_type == "particle" - assert ( - amr4._get_field_info(ptype, "particle_position_x").sampling_type - == "particle" - ) - assert ( - amr4._get_field_info(ptype, "particle_position_y").sampling_type - == "particle" - ) - assert ( - amr4._get_field_info(ptype, "particle_position_z").sampling_type - == "particle" - ) - assert amr4._get_field_info(ptype, "particle_mass").sampling_type == "particle" - - for grid in amr3.index.grids: - tot_parts = grid["dm", "particle_position_x"].size - tot_parts += grid["star", "particle_position_x"].size - tot_all_parts = grid["all", "particle_position_x"].size - assert tot_parts == grid.NumberOfParticles - assert tot_all_parts == grid.NumberOfParticles - - for grid in amr4.index.grids: - tot_parts = grid["dm", "particle_position_x"].size - tot_parts += grid["star", "particle_position_x"].size - tot_all_parts = grid["all", "particle_position_x"].size - assert tot_parts == grid.NumberOfParticles - assert tot_all_parts == grid.NumberOfParticles - def test_load_particles_types(): diff --git a/yt/utilities/tests/test_particle_generator.py b/yt/utilities/tests/test_particle_generator.py index 9ad90880d8e..fcff91d8f28 100644 --- a/yt/utilities/tests/test_particle_generator.py +++ b/yt/utilities/tests/test_particle_generator.py @@ -1,7 +1,5 @@ import numpy as np -import yt.utilities.flagging_methods as fm -import yt.utilities.initial_conditions as ic from yt.loaders import load_uniform_grid from yt.testing import assert_almost_equal, assert_equal from yt.units.yt_array import uconcatenate @@ -18,10 +16,7 @@ def test_particle_generator(): dens = np.zeros(domain_dims) + 0.1 temp = 4.0 * np.ones(domain_dims) fields = {"density": (dens, "code_mass/code_length**3"), "temperature": (temp, "K")} - ug = load_uniform_grid(fields, domain_dims, 1.0) - fo = [ic.BetaModelSphere(1.0, 0.1, 0.5, [0.5, 0.5, 0.5], {"density": (10.0)})] - rc = [fm.flagging_method_registry["overdensity"](4.0)] - ds = ug.refine_amr(rc, fo, 3) + ds = load_uniform_grid(fields, domain_dims, 1.0) # Now generate particles from density From 81c2cd1c4e3e2339bea3a039e5af2b50869dfd62 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 16:30:12 +0200 Subject: [PATCH 494/653] relocate stream loaders to yt.loaders --- yt/frontends/stream/api.py | 8 - yt/frontends/stream/loaders.py | 1103 ---------------- .../stream/tests/test_stream_particles.py | 2 +- yt/loaders.py | 1115 ++++++++++++++++- 4 files changed, 1108 insertions(+), 1120 deletions(-) delete mode 100644 yt/frontends/stream/loaders.py diff --git a/yt/frontends/stream/api.py b/yt/frontends/stream/api.py index 9e3b2364954..2cfea528204 100644 --- a/yt/frontends/stream/api.py +++ b/yt/frontends/stream/api.py @@ -8,11 +8,3 @@ ) from .fields import StreamFieldInfo from .io import IOHandlerStream -from .loaders import ( - load_amr_grids, - load_hexahedral_mesh, - load_octree, - load_particles, - load_uniform_grid, - load_unstructured_mesh, -) diff --git a/yt/frontends/stream/loaders.py b/yt/frontends/stream/loaders.py deleted file mode 100644 index 853c7a61d69..00000000000 --- a/yt/frontends/stream/loaders.py +++ /dev/null @@ -1,1103 +0,0 @@ -import numpy as np - -from yt.frontends.exodus_ii.util import get_num_pseudo_dims -from yt.funcs import ensure_list, issue_deprecation_warning -from yt.utilities.decompose import decompose_array, get_psize -from yt.utilities.exceptions import YTIllDefinedAMR -from yt.utilities.lib.misc_utilities import get_box_grids_level -from yt.utilities.logger import ytLogger as mylog - -from .data_structures import ( - StreamDataset, - StreamDictFieldHandler, - StreamHandler, - StreamHexahedralDataset, - StreamOctreeDataset, - StreamParticlesDataset, - StreamUnstructuredMeshDataset, -) -from .definitions import assign_particle_data, process_data, set_particle_types - - -def load_uniform_grid( - data, - domain_dimensions, - length_unit=None, - bbox=None, - nprocs=1, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", -): - r"""Load a uniform grid of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow a uniform grid of data to be loaded directly into yt and - analyzed as would any others. This comes with several caveats: - - * Units will be incorrect unless the unit system is explicitly - specified. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - - Particle fields are detected as one-dimensional fields. - - Parameters - ---------- - data : dict - This is a dict of numpy arrays or (numpy array, unit spec) tuples. - The keys are the field names. - domain_dimensions : array_like - This is the domain dimensions of the grid - length_unit : string - Unit to use for lengths. Defaults to unitless. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units specified by length_unit. - Defaults to a cubic unit-length domain. - nprocs: integer, optional - If greater than 1, will create this number of subarrays out of data - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - Examples - -------- - - >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]]) - >>> arr = np.random.random((128, 128, 128)) - >>> data = dict(density=arr) - >>> ds = load_uniform_grid(data, arr.shape, length_unit='cm', - ... bbox=bbox, nprocs=12) - >>> dd = ds.all_data() - >>> dd['density'] - YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, - 0.97506269, 0.03047113]) g/cm**3 - """ - - domain_dimensions = np.array(domain_dimensions) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - # If someone included this throw it away--old API - if "number_of_particles" in data: - issue_deprecation_warning( - "It is no longer necessary to include " - "the number of particles in the data " - "dict. The number of particles is " - "determined from the sizes of the " - "particle fields." - ) - data.pop("number_of_particles") - # First we fix our field names, apply units to data - # and check for consistency of field shapes - field_units, data, number_of_particles = process_data( - data, grid_dims=tuple(domain_dimensions) - ) - - sfh = StreamDictFieldHandler() - - if number_of_particles > 0: - particle_types = set_particle_types(data) - # Used much further below. - pdata = {"number_of_particles": number_of_particles} - for key in list(data.keys()): - if len(data[key].shape) == 1 or key[0] == "io": - if not isinstance(key, tuple): - field = ("io", key) - mylog.debug("Reassigning '%s' to '%s'", key, field) - else: - field = key - sfh._additional_fields += (field,) - pdata[field] = data.pop(key) - else: - particle_types = {} - - if nprocs > 1: - temp = {} - new_data = {} - for key in data.keys(): - psize = get_psize(np.array(data[key].shape), nprocs) - grid_left_edges, grid_right_edges, shapes, slices = decompose_array( - data[key].shape, psize, bbox - ) - grid_dimensions = np.array([shape for shape in shapes], dtype="int32") - temp[key] = [data[key][slice] for slice in slices] - for gid in range(nprocs): - new_data[gid] = {} - for key in temp.keys(): - new_data[gid].update({key: temp[key][gid]}) - sfh.update(new_data) - del new_data, temp - else: - sfh.update({0: data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # particle count - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "UniformGridData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - if np.all(domain_dimensions[1:] == 1): - dimensionality = 1 - elif domain_dimensions[2] == 1: - dimensionality = 2 - else: - dimensionality = 3 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) - - # Now figure out where the particles go - if number_of_particles > 0: - # This will update the stream handler too - assign_particle_data(sds, pdata, bbox) - - return sds - - -def load_amr_grids( - grid_data, - domain_dimensions, - bbox=None, - sim_time=0.0, - length_unit=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - refine_by=2, - unit_system="cgs", -): - r"""Load a set of grids of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - This should allow a sequence of grids of varying resolution of data to be - loaded directly into yt and analyzed as would any others. This comes with - several caveats: - - * Units will be incorrect unless the unit system is explicitly specified. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - * No consistency checks are performed on the index - - Parameters - ---------- - - grid_data : list of dicts - This is a list of dicts. Each dict must have entries "left_edge", - "right_edge", "dimensions", "level", and then any remaining entries are - assumed to be fields. Field entries must map to an NDArray. The grid_data - may also include a particle count. If no particle count is supplied, the - dataset is understood to contain no particles. The grid_data will be - modified in place and can't be assumed to be static. - domain_dimensions : array_like - This is the domain dimensions of the grid - length_unit : string or float - Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox - dimensions are assumed to be in the corresponding units. If set to a float, the - value is a assumed to be the conversion from bbox dimensions to centimeters. - mass_unit : string or float - Unit to use for masses. Defaults to unitless. - time_unit : string or float - Unit to use for times. Defaults to unitless. - velocity_unit : string or float - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string or float - Unit to use for magnetic fields. Defaults to unitless. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units specified by length_unit. - Defaults to a cubic unit-length domain. - sim_time : float, optional - The simulation time in seconds - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - refine_by : integer or list/array of integers. - Specifies the refinement ratio between levels. Defaults to 2. This - can be an array, in which case it specifies for each dimension. For - instance, this can be used to say that some datasets have refinement of - 1 in one dimension, indicating that they span the full range in that - dimension. - - Examples - -------- - - >>> grid_data = [ - ... dict(left_edge = [0.0, 0.0, 0.0], - ... right_edge = [1.0, 1.0, 1.], - ... level = 0, - ... dimensions = [32, 32, 32], - ... number_of_particles = 0), - ... dict(left_edge = [0.25, 0.25, 0.25], - ... right_edge = [0.75, 0.75, 0.75], - ... level = 1, - ... dimensions = [32, 32, 32], - ... number_of_particles = 0) - ... ] - ... - >>> for g in grid_data: - ... g["density"] = (np.random.random(g["dimensions"])*2**g["level"], "g/cm**3") - ... - >>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0) - """ - - domain_dimensions = np.array(domain_dimensions) - ngrids = len(grid_data) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros((ngrids, 1), dtype="int32") - grid_left_edges = np.zeros((ngrids, 3), dtype="float64") - grid_right_edges = np.zeros((ngrids, 3), dtype="float64") - grid_dimensions = np.zeros((ngrids, 3), dtype="int32") - number_of_particles = np.zeros((ngrids, 1), dtype="int64") - parent_ids = np.zeros(ngrids, dtype="int64") - 1 - sfh = StreamDictFieldHandler() - for i, g in enumerate(grid_data): - grid_left_edges[i, :] = g.pop("left_edge") - grid_right_edges[i, :] = g.pop("right_edge") - grid_dimensions[i, :] = g.pop("dimensions") - grid_levels[i, :] = g.pop("level") - # If someone included this throw it away--old API - if "number_of_particles" in g: - issue_deprecation_warning( - "It is no longer necessary to include " - "the number of particles in the data " - "dict. The number of particles is " - "determined from the sizes of the " - "particle fields." - ) - g.pop("number_of_particles") - field_units, data, n_particles = process_data( - g, grid_dims=tuple(grid_dimensions[i, :]) - ) - number_of_particles[i, :] = n_particles - sfh[i] = data - - # We now reconstruct our parent ids, so that our particle assignment can - # proceed. - mask = np.empty(ngrids, dtype="int32") - for gi in range(ngrids): - get_box_grids_level( - grid_left_edges[gi, :], - grid_right_edges[gi, :], - grid_levels[gi] + 1, - grid_left_edges, - grid_right_edges, - grid_levels, - mask, - ) - ids = np.where(mask.astype("bool")) - for ci in ids: - parent_ids[ci] = gi - - # Check if the grid structure is properly aligned (bug #1295) - for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1): - idx = grid_levels.flatten() == lvl - dims = domain_dimensions * refine_by ** (lvl - 1) - for iax, ax in enumerate("xyz"): - cell_edges = np.linspace( - domain_left_edge[iax], domain_right_edge[iax], dims[iax], endpoint=False - ) - if set(grid_left_edges[idx, iax]) - set(cell_edges): - raise YTIllDefinedAMR(lvl, ax) - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - particle_types = {} - - for grid in sfh.values(): - particle_types.update(set_particle_types(grid)) - - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - parent_ids, - number_of_particles, - np.zeros(ngrids).reshape((ngrids, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "AMRGridData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = refine_by - if np.all(domain_dimensions[1:] == 1): - dimensionality = 1 - elif domain_dimensions[2] == 1: - dimensionality = 2 - else: - dimensionality = 3 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) - return sds - - -def load_particles( - data, - length_unit=None, - bbox=None, - sim_time=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", - data_source=None, -): - r"""Load a set of particles into yt as a - :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`. - - This will allow a collection of particle data to be loaded directly into - yt and analyzed as would any others. This comes with several caveats: - - * There must be sufficient space in memory to contain all the particle - data. - * Parallelism will be disappointing or non-existent in most cases. - * Fluid fields are not supported. - - Note: in order for the dataset to take advantage of SPH functionality, - the following two fields must be provided: - * ('io', 'density') - * ('io', 'smoothing_length') - - Parameters - ---------- - data : dict - This is a dict of numpy arrays or (numpy array, unit name) tuples, - where the keys are the field names. Particles positions must be named - "particle_position_x", "particle_position_y", and "particle_position_z". - length_unit : float - Conversion factor from simulation length units to centimeters - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length_unit - sim_time : float, optional - The simulation time in seconds - mass_unit : float - Conversion factor from simulation mass units to grams - time_unit : float - Conversion factor from simulation time units to seconds - velocity_unit : float - Conversion factor from simulation velocity units to cm/s - magnetic_unit : float - Conversion factor from simulation magnetic units to gauss - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - data_source : YTSelectionContainer, optional - If set, parameters like `bbox`, `sim_time`, and code units are derived - from it. - - Examples - -------- - - >>> pos = [np.random.random(128*128*128) for i in range(3)] - >>> data = dict(particle_position_x = pos[0], - ... particle_position_y = pos[1], - ... particle_position_z = pos[2]) - >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]]) - >>> ds = load_particles(data, 3.08e24, bbox=bbox) - - """ - - domain_dimensions = np.ones(3, "int32") - nprocs = 1 - - # Parse bounding box - if data_source is not None: - le, re = data_source.get_bbox() - le = le.to_value("code_length") - re = re.to_value("code_length") - bbox = list(zip(le, re)) - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - else: - bbox = np.array(bbox) - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - # Parse simulation time - if data_source is not None: - sim_time = data_source.ds.current_time - if sim_time is None: - sim_time = 0.0 - else: - sim_time = float(sim_time) - - # Parse units - def parse_unit(unit, dimension): - if unit is None: - unit = "code_" + dimension - if data_source is not None: - unit = getattr(data_source.ds, dimension + "_unit", unit) - return unit - - length_unit = parse_unit(length_unit, "length") - mass_unit = parse_unit(mass_unit, "mass") - time_unit = parse_unit(time_unit, "time") - velocity_unit = parse_unit(velocity_unit, "velocity") - magnetic_unit = parse_unit(magnetic_unit, "magnetic") - - # Preprocess data - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - pdata = {} - for key in data.keys(): - if not isinstance(key, tuple): - field = ("io", key) - mylog.debug("Reassigning '%s' to '%s'", key, field) - else: - field = key - pdata[field] = data[key] - sfh._additional_fields += (field,) - data = pdata # Drop reference count - particle_types = set_particle_types(data) - sfh.update({"stream_file": data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "ParticleData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system) - - return sds - - -def load_hexahedral_mesh( - data, - connectivity, - coordinates, - length_unit=None, - bbox=None, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - geometry="cartesian", - unit_system="cgs", -): - r"""Load a hexahedral mesh of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow a semistructured grid of data to be loaded directly into - yt and analyzed as would any others. This comes with several caveats: - - * Units will be incorrect unless the data has already been converted to - cgs. - * Some functions may behave oddly, and parallelism will be - disappointing or non-existent in most cases. - * Particles may be difficult to integrate. - - Particle fields are detected as one-dimensional fields. The number of particles - is set by the "number_of_particles" key in data. - - Parameters - ---------- - data : dict - This is a dict of numpy arrays, where the keys are the field names. - There must only be one. Note that the data in the numpy arrays should - define the cell-averaged value for of the quantity in in the hexahedral - cell. - connectivity : array_like - This should be of size (N,8) where N is the number of zones. - coordinates : array_like - This should be of size (M,3) where M is the number of vertices - indicated in the connectivity matrix. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length unit. - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - """ - - domain_dimensions = np.ones(3, "int32") * 2 - nprocs = 1 - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - particle_types = set_particle_types(data) - - sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data}) - # Simple check for axis length correctness - if len(data) > 0: - fn = list(sorted(data))[0] - array_values = data[fn] - if array_values.size != connectivity.shape[0]: - mylog.error( - "Dimensions of array must be one fewer than the coordinate set." - ) - raise RuntimeError - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "HexahedralMeshData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system) - - return sds - - -def load_octree( - octree_mask, - data, - bbox=None, - sim_time=0.0, - length_unit=None, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(True, True, True), - over_refine_factor=1, - partial_coverage=1, - unit_system="cgs", -): - r"""Load an octree mask into yt. - - Octrees can be saved out by calling save_octree on an OctreeContainer. - This enables them to be loaded back in. - - This will initialize an Octree of data. Note that fluid fields will not - work yet, or possibly ever. - - Parameters - ---------- - octree_mask : np.ndarray[uint8_t] - This is a depth-first refinement mask for an Octree. It should be - of size n_octs * 8 (but see note about the root oct below), where - each item is 1 for an oct-cell being refined and 0 for it not being - refined. For over_refine_factors != 1, the children count will - still be 8, so there will stil be n_octs * 8 entries. Note that if - the root oct is not refined, there will be only one entry - for the root, so the size of the mask will be (n_octs - 1)*8 + 1. - data : dict - A dictionary of 1D arrays. Note that these must of the size of the - number of "False" values in the ``octree_mask``. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of length - sim_time : float, optional - The simulation time in seconds - length_unit : string - Unit to use for lengths. Defaults to unitless. - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - partial_coverage : boolean - Whether or not an oct can be refined cell-by-cell, or whether all - 8 get refined. - - Example - ------- - - >>> import yt - >>> import numpy as np - >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8, - ... 0, 0, 0, 0, 0, 0, 0, 0, - ... 8, 0, 0, 0, 0, 0, 0, 0, - ... 0] - >>> - >>> octree_mask = np.array(oct_mask, dtype=np.uint8) - >>> quantities = {} - >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8') - >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]]) - >>> - >>> ds = yt.load_octree(octree_mask=octree_mask, - ... data=quantities, - ... bbox=bbox, - ... over_refine_factor=0, - ... partial_coverage=0) - - """ - - if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8: - raise TypeError("octree_mask should be a Numpy array with type uint8") - - nz = 1 << (over_refine_factor) - domain_dimensions = np.array([nz, nz, nz]) - nprocs = 1 - if bbox is None: - bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units, data, _ = process_data(data) - sfh = StreamDictFieldHandler() - - particle_types = set_particle_types(data) - - sfh.update({0: data}) - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "OctreeData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = 3 - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamOctreeDataset(handler, unit_system=unit_system) - sds.octree_mask = octree_mask - sds.partial_coverage = partial_coverage - sds.over_refine_factor = over_refine_factor - - return sds - - -def load_unstructured_mesh( - connectivity, - coordinates, - node_data=None, - elem_data=None, - length_unit=None, - bbox=None, - sim_time=0.0, - mass_unit=None, - time_unit=None, - velocity_unit=None, - magnetic_unit=None, - periodicity=(False, False, False), - geometry="cartesian", - unit_system="cgs", -): - r"""Load an unstructured mesh of data into yt as a - :class:`~yt.frontends.stream.data_structures.StreamHandler`. - - This should allow an unstructured mesh data to be loaded directly into - yt and analyzed as would any others. Not all functionality for - visualization will be present, and some analysis functions may not yet have - been implemented. - - Particle fields are detected as one-dimensional fields. The number of - particles is set by the "number_of_particles" key in data. - - In the parameter descriptions below, a "vertex" is a 3D point in space, an - "element" is a single polyhedron whose location is defined by a set of - vertices, and a "mesh" is a set of polyhedral elements, each with the same - number of vertices. - - Parameters - ---------- - - connectivity : list of array_like or array_like - This should either be a single 2D array or list of 2D arrays. If this - is a list, each element in the list corresponds to the connectivity - information for a distinct mesh. Each array can have different - connectivity length and should be of shape (N,M) where N is the number - of elements and M is the number of vertices per element. - coordinates : array_like - The 3D coordinates of mesh vertices. This should be of size (L, D) where - L is the number of vertices and D is the number of coordinates per vertex - (the spatial dimensions of the dataset). Currently this must be either 2 or 3. - When loading more than one mesh, the data for each mesh should be concatenated - into a single coordinates array. - node_data : dict or list of dicts - For a single mesh, a dict mapping field names to 2D numpy arrays, - representing data defined at element vertices. For multiple meshes, - this must be a list of dicts. Note that these are not the values as a - function of the coordinates, but of the connectivity. Their shape - should be the same as the connectivity. This means that if the data is - in the shape of the coordinates, you may need to reshape them using the - `connectivity` array as an index. - elem_data : dict or list of dicts - For a single mesh, a dict mapping field names to 1D numpy arrays, where - each array has a length equal to the number of elements. The data - must be defined at the center of each mesh element and there must be - only one data value for each element. For multiple meshes, this must be - a list of dicts, with one dict for each mesh. - bbox : array_like (xdim:zdim, LE:RE), optional - Size of computational domain in units of the length unit. - sim_time : float, optional - The simulation time in seconds - mass_unit : string - Unit to use for masses. Defaults to unitless. - time_unit : string - Unit to use for times. Defaults to unitless. - velocity_unit : string - Unit to use for velocities. Defaults to unitless. - magnetic_unit : string - Unit to use for magnetic fields. Defaults to unitless. - periodicity : tuple of booleans - Determines whether the data will be treated as periodic along - each axis - geometry : string or tuple - "cartesian", "cylindrical", "polar", "spherical", "geographic" or - "spectral_cube". Optionally, a tuple can be provided to specify the - axis ordering -- for instance, to specify that the axis ordering should - be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same - can be done for other coordinates, for instance: - ("spherical", ("theta", "phi", "r")). - - Examples - -------- - - Load a simple mesh consisting of two tets. - - >>> # Coordinates for vertices of two tetrahedra - >>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], - ... [0.5, 1, 0.5], [0.5, 0.5, 0.0], - ... [0.5, 0.5, 1.0]]) - >>> # The indices in the coordinates array of mesh vertices. - >>> # This mesh has two elements. - >>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]]) - >>> - >>> # Field data defined at the centers of the two mesh elements. - >>> elem_data = { - ... ('connect1', 'elem_field'): np.array([1, 2]) - ... } - >>> - >>> # Field data defined at node vertices - >>> node_data = { - ... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0], - ... [0.0, 1.0, 2.0, 3.0]]) - ... } - >>> - >>> ds = yt.load_unstructured_mesh(connectivity, coordinates, - ... elem_data=elem_data, - ... node_data=node_data) - """ - - dimensionality = coordinates.shape[1] - domain_dimensions = np.ones(3, "int32") * 2 - nprocs = 1 - - if elem_data is None and node_data is None: - raise RuntimeError("No data supplied in load_unstructured_mesh.") - - if isinstance(connectivity, list): - num_meshes = len(connectivity) - else: - num_meshes = 1 - connectivity = ensure_list(connectivity) - - if elem_data is None: - elem_data = [{} for i in range(num_meshes)] - elem_data = ensure_list(elem_data) - - if node_data is None: - node_data = [{} for i in range(num_meshes)] - node_data = ensure_list(node_data) - - data = [{} for i in range(num_meshes)] - for elem_dict, data_dict in zip(elem_data, data): - for field, values in elem_dict.items(): - data_dict[field] = values - for node_dict, data_dict in zip(node_data, data): - for field, values in node_dict.items(): - data_dict[field] = values - data = ensure_list(data) - - if bbox is None: - bbox = [ - [ - coordinates[:, i].min() - 0.1 * abs(coordinates[:, i].min()), - coordinates[:, i].max() + 0.1 * abs(coordinates[:, i].max()), - ] - for i in range(dimensionality) - ] - - if dimensionality < 3: - bbox.append([0.0, 1.0]) - if dimensionality < 2: - bbox.append([0.0, 1.0]) - - # handle pseudo-dims here - num_pseudo_dims = get_num_pseudo_dims(coordinates) - dimensionality -= num_pseudo_dims - for i in range(dimensionality, 3): - bbox[i][0] = 0.0 - bbox[i][1] = 1.0 - - bbox = np.array(bbox, dtype=np.float64) - domain_left_edge = np.array(bbox[:, 0], "float64") - domain_right_edge = np.array(bbox[:, 1], "float64") - grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) - - field_units = {} - particle_types = {} - sfh = StreamDictFieldHandler() - - sfh.update({"connectivity": connectivity, "coordinates": coordinates}) - for i, d in enumerate(data): - _f_unit, _data, _ = process_data(d) - field_units.update(_f_unit) - sfh[i] = _data - particle_types.update(set_particle_types(d)) - # Simple check for axis length correctness - if 0 and len(data) > 0: - fn = list(sorted(data))[0] - array_values = data[fn] - if array_values.size != connectivity.shape[0]: - mylog.error( - "Dimensions of array must be one fewer than the coordinate set." - ) - raise RuntimeError - grid_left_edges = domain_left_edge - grid_right_edges = domain_right_edge - grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") - - if length_unit is None: - length_unit = "code_length" - if mass_unit is None: - mass_unit = "code_mass" - if time_unit is None: - time_unit = "code_time" - if velocity_unit is None: - velocity_unit = "code_velocity" - if magnetic_unit is None: - magnetic_unit = "code_magnetic" - - # I'm not sure we need any of this. - handler = StreamHandler( - grid_left_edges, - grid_right_edges, - grid_dimensions, - grid_levels, - -np.ones(nprocs, dtype="int64"), - np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary - np.zeros(nprocs).reshape((nprocs, 1)), - sfh, - field_units, - (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), - particle_types=particle_types, - periodicity=periodicity, - ) - - handler.name = "UnstructuredMeshData" - handler.domain_left_edge = domain_left_edge - handler.domain_right_edge = domain_right_edge - handler.refine_by = 2 - handler.dimensionality = dimensionality - handler.domain_dimensions = domain_dimensions - handler.simulation_time = sim_time - handler.cosmology_simulation = 0 - - sds = StreamUnstructuredMeshDataset( - handler, geometry=geometry, unit_system=unit_system - ) - - fluid_types = ["all"] - for i in range(1, num_meshes + 1): - fluid_types += ["connect%d" % i] - sds.fluid_types = tuple(fluid_types) - - def flatten(l): - return [item for sublist in l for item in sublist] - - sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m]) - sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m]) - sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1] - - return sds diff --git a/yt/frontends/stream/tests/test_stream_particles.py b/yt/frontends/stream/tests/test_stream_particles.py index 1835fcf8fb8..2f6a325147c 100644 --- a/yt/frontends/stream/tests/test_stream_particles.py +++ b/yt/frontends/stream/tests/test_stream_particles.py @@ -1,7 +1,7 @@ import numpy as np import yt.utilities.initial_conditions as ic -from yt.frontends.stream.api import load_particles, load_uniform_grid +from yt.loaders import load_particles, load_uniform_grid from yt.testing import assert_equal, fake_particle_ds, fake_sph_orientation_ds # Field information diff --git a/yt/loaders.py b/yt/loaders.py index 8808fef2964..f5f0885b40a 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -5,13 +5,1112 @@ # note: in the future, functions could be moved here instead # in which case, this file should be removed from flake8 ignore list in setup.cfg -from .convenience import load, load_simulation, simulation -from .frontends.stream.loaders import ( - load_amr_grids, - load_hexahedral_mesh, - load_octree, - load_particles, - load_uniform_grid, - load_unstructured_mesh, +import numpy as np + +from yt.frontends.exodus_ii.util import get_num_pseudo_dims +from yt.frontends.stream.data_structures import ( + StreamDataset, + StreamDictFieldHandler, + StreamHandler, + StreamHexahedralDataset, + StreamOctreeDataset, + StreamParticlesDataset, + StreamUnstructuredMeshDataset, ) +from yt.frontends.stream.definitions import ( + assign_particle_data, + process_data, + set_particle_types, +) +from yt.funcs import ensure_list, issue_deprecation_warning +from yt.utilities.decompose import decompose_array, get_psize +from yt.utilities.exceptions import YTIllDefinedAMR +from yt.utilities.lib.misc_utilities import get_box_grids_level +from yt.utilities.logger import ytLogger as mylog + +from .convenience import load, load_simulation, simulation from .utilities.load_sample import load_sample + + +def load_uniform_grid( + data, + domain_dimensions, + length_unit=None, + bbox=None, + nprocs=1, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", +): + r"""Load a uniform grid of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow a uniform grid of data to be loaded directly into yt and + analyzed as would any others. This comes with several caveats: + + * Units will be incorrect unless the unit system is explicitly + specified. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + + Particle fields are detected as one-dimensional fields. + + Parameters + ---------- + data : dict + This is a dict of numpy arrays or (numpy array, unit spec) tuples. + The keys are the field names. + domain_dimensions : array_like + This is the domain dimensions of the grid + length_unit : string + Unit to use for lengths. Defaults to unitless. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units specified by length_unit. + Defaults to a cubic unit-length domain. + nprocs: integer, optional + If greater than 1, will create this number of subarrays out of data + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + Examples + -------- + + >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]]) + >>> arr = np.random.random((128, 128, 128)) + >>> data = dict(density=arr) + >>> ds = load_uniform_grid(data, arr.shape, length_unit='cm', + ... bbox=bbox, nprocs=12) + >>> dd = ds.all_data() + >>> dd['density'] + YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, + 0.97506269, 0.03047113]) g/cm**3 + """ + + domain_dimensions = np.array(domain_dimensions) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + # If someone included this throw it away--old API + if "number_of_particles" in data: + issue_deprecation_warning( + "It is no longer necessary to include " + "the number of particles in the data " + "dict. The number of particles is " + "determined from the sizes of the " + "particle fields." + ) + data.pop("number_of_particles") + # First we fix our field names, apply units to data + # and check for consistency of field shapes + field_units, data, number_of_particles = process_data( + data, grid_dims=tuple(domain_dimensions) + ) + + sfh = StreamDictFieldHandler() + + if number_of_particles > 0: + particle_types = set_particle_types(data) + # Used much further below. + pdata = {"number_of_particles": number_of_particles} + for key in list(data.keys()): + if len(data[key].shape) == 1 or key[0] == "io": + if not isinstance(key, tuple): + field = ("io", key) + mylog.debug("Reassigning '%s' to '%s'", key, field) + else: + field = key + sfh._additional_fields += (field,) + pdata[field] = data.pop(key) + else: + particle_types = {} + + if nprocs > 1: + temp = {} + new_data = {} + for key in data.keys(): + psize = get_psize(np.array(data[key].shape), nprocs) + grid_left_edges, grid_right_edges, shapes, slices = decompose_array( + data[key].shape, psize, bbox + ) + grid_dimensions = np.array([shape for shape in shapes], dtype="int32") + temp[key] = [data[key][slice] for slice in slices] + for gid in range(nprocs): + new_data[gid] = {} + for key in temp.keys(): + new_data[gid].update({key: temp[key][gid]}) + sfh.update(new_data) + del new_data, temp + else: + sfh.update({0: data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # particle count + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "UniformGridData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + if np.all(domain_dimensions[1:] == 1): + dimensionality = 1 + elif domain_dimensions[2] == 1: + dimensionality = 2 + else: + dimensionality = 3 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) + + # Now figure out where the particles go + if number_of_particles > 0: + # This will update the stream handler too + assign_particle_data(sds, pdata, bbox) + + return sds + + +def load_amr_grids( + grid_data, + domain_dimensions, + bbox=None, + sim_time=0.0, + length_unit=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + refine_by=2, + unit_system="cgs", +): + r"""Load a set of grids of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + This should allow a sequence of grids of varying resolution of data to be + loaded directly into yt and analyzed as would any others. This comes with + several caveats: + + * Units will be incorrect unless the unit system is explicitly specified. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + * No consistency checks are performed on the index + + Parameters + ---------- + + grid_data : list of dicts + This is a list of dicts. Each dict must have entries "left_edge", + "right_edge", "dimensions", "level", and then any remaining entries are + assumed to be fields. Field entries must map to an NDArray. The grid_data + may also include a particle count. If no particle count is supplied, the + dataset is understood to contain no particles. The grid_data will be + modified in place and can't be assumed to be static. + domain_dimensions : array_like + This is the domain dimensions of the grid + length_unit : string or float + Unit to use for lengths. Defaults to unitless. If set to be a string, the bbox + dimensions are assumed to be in the corresponding units. If set to a float, the + value is a assumed to be the conversion from bbox dimensions to centimeters. + mass_unit : string or float + Unit to use for masses. Defaults to unitless. + time_unit : string or float + Unit to use for times. Defaults to unitless. + velocity_unit : string or float + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string or float + Unit to use for magnetic fields. Defaults to unitless. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units specified by length_unit. + Defaults to a cubic unit-length domain. + sim_time : float, optional + The simulation time in seconds + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + refine_by : integer or list/array of integers. + Specifies the refinement ratio between levels. Defaults to 2. This + can be an array, in which case it specifies for each dimension. For + instance, this can be used to say that some datasets have refinement of + 1 in one dimension, indicating that they span the full range in that + dimension. + + Examples + -------- + + >>> grid_data = [ + ... dict(left_edge = [0.0, 0.0, 0.0], + ... right_edge = [1.0, 1.0, 1.], + ... level = 0, + ... dimensions = [32, 32, 32], + ... number_of_particles = 0), + ... dict(left_edge = [0.25, 0.25, 0.25], + ... right_edge = [0.75, 0.75, 0.75], + ... level = 1, + ... dimensions = [32, 32, 32], + ... number_of_particles = 0) + ... ] + ... + >>> for g in grid_data: + ... g["density"] = (np.random.random(g["dimensions"])*2**g["level"], "g/cm**3") + ... + >>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0) + """ + + domain_dimensions = np.array(domain_dimensions) + ngrids = len(grid_data) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros((ngrids, 1), dtype="int32") + grid_left_edges = np.zeros((ngrids, 3), dtype="float64") + grid_right_edges = np.zeros((ngrids, 3), dtype="float64") + grid_dimensions = np.zeros((ngrids, 3), dtype="int32") + number_of_particles = np.zeros((ngrids, 1), dtype="int64") + parent_ids = np.zeros(ngrids, dtype="int64") - 1 + sfh = StreamDictFieldHandler() + for i, g in enumerate(grid_data): + grid_left_edges[i, :] = g.pop("left_edge") + grid_right_edges[i, :] = g.pop("right_edge") + grid_dimensions[i, :] = g.pop("dimensions") + grid_levels[i, :] = g.pop("level") + # If someone included this throw it away--old API + if "number_of_particles" in g: + issue_deprecation_warning( + "It is no longer necessary to include " + "the number of particles in the data " + "dict. The number of particles is " + "determined from the sizes of the " + "particle fields." + ) + g.pop("number_of_particles") + field_units, data, n_particles = process_data( + g, grid_dims=tuple(grid_dimensions[i, :]) + ) + number_of_particles[i, :] = n_particles + sfh[i] = data + + # We now reconstruct our parent ids, so that our particle assignment can + # proceed. + mask = np.empty(ngrids, dtype="int32") + for gi in range(ngrids): + get_box_grids_level( + grid_left_edges[gi, :], + grid_right_edges[gi, :], + grid_levels[gi] + 1, + grid_left_edges, + grid_right_edges, + grid_levels, + mask, + ) + ids = np.where(mask.astype("bool")) + for ci in ids: + parent_ids[ci] = gi + + # Check if the grid structure is properly aligned (bug #1295) + for lvl in range(grid_levels.min() + 1, grid_levels.max() + 1): + idx = grid_levels.flatten() == lvl + dims = domain_dimensions * refine_by ** (lvl - 1) + for iax, ax in enumerate("xyz"): + cell_edges = np.linspace( + domain_left_edge[iax], domain_right_edge[iax], dims[iax], endpoint=False + ) + if set(grid_left_edges[idx, iax]) - set(cell_edges): + raise YTIllDefinedAMR(lvl, ax) + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + particle_types = {} + + for grid in sfh.values(): + particle_types.update(set_particle_types(grid)) + + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + parent_ids, + number_of_particles, + np.zeros(ngrids).reshape((ngrids, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "AMRGridData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = refine_by + if np.all(domain_dimensions[1:] == 1): + dimensionality = 1 + elif domain_dimensions[2] == 1: + dimensionality = 2 + else: + dimensionality = 3 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamDataset(handler, geometry=geometry, unit_system=unit_system) + return sds + + +def load_particles( + data, + length_unit=None, + bbox=None, + sim_time=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", + data_source=None, +): + r"""Load a set of particles into yt as a + :class:`~yt.frontends.stream.data_structures.StreamParticleHandler`. + + This will allow a collection of particle data to be loaded directly into + yt and analyzed as would any others. This comes with several caveats: + + * There must be sufficient space in memory to contain all the particle + data. + * Parallelism will be disappointing or non-existent in most cases. + * Fluid fields are not supported. + + Note: in order for the dataset to take advantage of SPH functionality, + the following two fields must be provided: + * ('io', 'density') + * ('io', 'smoothing_length') + + Parameters + ---------- + data : dict + This is a dict of numpy arrays or (numpy array, unit name) tuples, + where the keys are the field names. Particles positions must be named + "particle_position_x", "particle_position_y", and "particle_position_z". + length_unit : float + Conversion factor from simulation length units to centimeters + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length_unit + sim_time : float, optional + The simulation time in seconds + mass_unit : float + Conversion factor from simulation mass units to grams + time_unit : float + Conversion factor from simulation time units to seconds + velocity_unit : float + Conversion factor from simulation velocity units to cm/s + magnetic_unit : float + Conversion factor from simulation magnetic units to gauss + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + data_source : YTSelectionContainer, optional + If set, parameters like `bbox`, `sim_time`, and code units are derived + from it. + + Examples + -------- + + >>> pos = [np.random.random(128*128*128) for i in range(3)] + >>> data = dict(particle_position_x = pos[0], + ... particle_position_y = pos[1], + ... particle_position_z = pos[2]) + >>> bbox = np.array([[0., 1.0], [0.0, 1.0], [0.0, 1.0]]) + >>> ds = load_particles(data, 3.08e24, bbox=bbox) + + """ + + domain_dimensions = np.ones(3, "int32") + nprocs = 1 + + # Parse bounding box + if data_source is not None: + le, re = data_source.get_bbox() + le = le.to_value("code_length") + re = re.to_value("code_length") + bbox = list(zip(le, re)) + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + else: + bbox = np.array(bbox) + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + # Parse simulation time + if data_source is not None: + sim_time = data_source.ds.current_time + if sim_time is None: + sim_time = 0.0 + else: + sim_time = float(sim_time) + + # Parse units + def parse_unit(unit, dimension): + if unit is None: + unit = "code_" + dimension + if data_source is not None: + unit = getattr(data_source.ds, dimension + "_unit", unit) + return unit + + length_unit = parse_unit(length_unit, "length") + mass_unit = parse_unit(mass_unit, "mass") + time_unit = parse_unit(time_unit, "time") + velocity_unit = parse_unit(velocity_unit, "velocity") + magnetic_unit = parse_unit(magnetic_unit, "magnetic") + + # Preprocess data + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + pdata = {} + for key in data.keys(): + if not isinstance(key, tuple): + field = ("io", key) + mylog.debug("Reassigning '%s' to '%s'", key, field) + else: + field = key + pdata[field] = data[key] + sfh._additional_fields += (field,) + data = pdata # Drop reference count + particle_types = set_particle_types(data) + sfh.update({"stream_file": data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "ParticleData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamParticlesDataset(handler, geometry=geometry, unit_system=unit_system) + + return sds + + +def load_hexahedral_mesh( + data, + connectivity, + coordinates, + length_unit=None, + bbox=None, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + geometry="cartesian", + unit_system="cgs", +): + r"""Load a hexahedral mesh of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow a semistructured grid of data to be loaded directly into + yt and analyzed as would any others. This comes with several caveats: + + * Units will be incorrect unless the data has already been converted to + cgs. + * Some functions may behave oddly, and parallelism will be + disappointing or non-existent in most cases. + * Particles may be difficult to integrate. + + Particle fields are detected as one-dimensional fields. The number of particles + is set by the "number_of_particles" key in data. + + Parameters + ---------- + data : dict + This is a dict of numpy arrays, where the keys are the field names. + There must only be one. Note that the data in the numpy arrays should + define the cell-averaged value for of the quantity in in the hexahedral + cell. + connectivity : array_like + This should be of size (N,8) where N is the number of zones. + coordinates : array_like + This should be of size (M,3) where M is the number of vertices + indicated in the connectivity matrix. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length unit. + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + """ + + domain_dimensions = np.ones(3, "int32") * 2 + nprocs = 1 + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + particle_types = set_particle_types(data) + + sfh.update({"connectivity": connectivity, "coordinates": coordinates, 0: data}) + # Simple check for axis length correctness + if len(data) > 0: + fn = list(sorted(data))[0] + array_values = data[fn] + if array_values.size != connectivity.shape[0]: + mylog.error( + "Dimensions of array must be one fewer than the coordinate set." + ) + raise RuntimeError + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "HexahedralMeshData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamHexahedralDataset(handler, geometry=geometry, unit_system=unit_system) + + return sds + + +def load_octree( + octree_mask, + data, + bbox=None, + sim_time=0.0, + length_unit=None, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(True, True, True), + over_refine_factor=1, + partial_coverage=1, + unit_system="cgs", +): + r"""Load an octree mask into yt. + + Octrees can be saved out by calling save_octree on an OctreeContainer. + This enables them to be loaded back in. + + This will initialize an Octree of data. Note that fluid fields will not + work yet, or possibly ever. + + Parameters + ---------- + octree_mask : np.ndarray[uint8_t] + This is a depth-first refinement mask for an Octree. It should be + of size n_octs * 8 (but see note about the root oct below), where + each item is 1 for an oct-cell being refined and 0 for it not being + refined. For over_refine_factors != 1, the children count will + still be 8, so there will stil be n_octs * 8 entries. Note that if + the root oct is not refined, there will be only one entry + for the root, so the size of the mask will be (n_octs - 1)*8 + 1. + data : dict + A dictionary of 1D arrays. Note that these must of the size of the + number of "False" values in the ``octree_mask``. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of length + sim_time : float, optional + The simulation time in seconds + length_unit : string + Unit to use for lengths. Defaults to unitless. + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + partial_coverage : boolean + Whether or not an oct can be refined cell-by-cell, or whether all + 8 get refined. + + Example + ------- + + >>> import yt + >>> import numpy as np + >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8, + ... 0, 0, 0, 0, 0, 0, 0, 0, + ... 8, 0, 0, 0, 0, 0, 0, 0, + ... 0] + >>> + >>> octree_mask = np.array(oct_mask, dtype=np.uint8) + >>> quantities = {} + >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8') + >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]]) + >>> + >>> ds = yt.load_octree(octree_mask=octree_mask, + ... data=quantities, + ... bbox=bbox, + ... over_refine_factor=0, + ... partial_coverage=0) + + """ + + if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8: + raise TypeError("octree_mask should be a Numpy array with type uint8") + + nz = 1 << (over_refine_factor) + domain_dimensions = np.array([nz, nz, nz]) + nprocs = 1 + if bbox is None: + bbox = np.array([[0.0, 1.0], [0.0, 1.0], [0.0, 1.0]], "float64") + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units, data, _ = process_data(data) + sfh = StreamDictFieldHandler() + + particle_types = set_particle_types(data) + + sfh.update({0: data}) + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "OctreeData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = 3 + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamOctreeDataset(handler, unit_system=unit_system) + sds.octree_mask = octree_mask + sds.partial_coverage = partial_coverage + sds.over_refine_factor = over_refine_factor + + return sds + + +def load_unstructured_mesh( + connectivity, + coordinates, + node_data=None, + elem_data=None, + length_unit=None, + bbox=None, + sim_time=0.0, + mass_unit=None, + time_unit=None, + velocity_unit=None, + magnetic_unit=None, + periodicity=(False, False, False), + geometry="cartesian", + unit_system="cgs", +): + r"""Load an unstructured mesh of data into yt as a + :class:`~yt.frontends.stream.data_structures.StreamHandler`. + + This should allow an unstructured mesh data to be loaded directly into + yt and analyzed as would any others. Not all functionality for + visualization will be present, and some analysis functions may not yet have + been implemented. + + Particle fields are detected as one-dimensional fields. The number of + particles is set by the "number_of_particles" key in data. + + In the parameter descriptions below, a "vertex" is a 3D point in space, an + "element" is a single polyhedron whose location is defined by a set of + vertices, and a "mesh" is a set of polyhedral elements, each with the same + number of vertices. + + Parameters + ---------- + + connectivity : list of array_like or array_like + This should either be a single 2D array or list of 2D arrays. If this + is a list, each element in the list corresponds to the connectivity + information for a distinct mesh. Each array can have different + connectivity length and should be of shape (N,M) where N is the number + of elements and M is the number of vertices per element. + coordinates : array_like + The 3D coordinates of mesh vertices. This should be of size (L, D) where + L is the number of vertices and D is the number of coordinates per vertex + (the spatial dimensions of the dataset). Currently this must be either 2 or 3. + When loading more than one mesh, the data for each mesh should be concatenated + into a single coordinates array. + node_data : dict or list of dicts + For a single mesh, a dict mapping field names to 2D numpy arrays, + representing data defined at element vertices. For multiple meshes, + this must be a list of dicts. Note that these are not the values as a + function of the coordinates, but of the connectivity. Their shape + should be the same as the connectivity. This means that if the data is + in the shape of the coordinates, you may need to reshape them using the + `connectivity` array as an index. + elem_data : dict or list of dicts + For a single mesh, a dict mapping field names to 1D numpy arrays, where + each array has a length equal to the number of elements. The data + must be defined at the center of each mesh element and there must be + only one data value for each element. For multiple meshes, this must be + a list of dicts, with one dict for each mesh. + bbox : array_like (xdim:zdim, LE:RE), optional + Size of computational domain in units of the length unit. + sim_time : float, optional + The simulation time in seconds + mass_unit : string + Unit to use for masses. Defaults to unitless. + time_unit : string + Unit to use for times. Defaults to unitless. + velocity_unit : string + Unit to use for velocities. Defaults to unitless. + magnetic_unit : string + Unit to use for magnetic fields. Defaults to unitless. + periodicity : tuple of booleans + Determines whether the data will be treated as periodic along + each axis + geometry : string or tuple + "cartesian", "cylindrical", "polar", "spherical", "geographic" or + "spectral_cube". Optionally, a tuple can be provided to specify the + axis ordering -- for instance, to specify that the axis ordering should + be z, x, y, this would be: ("cartesian", ("z", "x", "y")). The same + can be done for other coordinates, for instance: + ("spherical", ("theta", "phi", "r")). + + Examples + -------- + + Load a simple mesh consisting of two tets. + + >>> # Coordinates for vertices of two tetrahedra + >>> coordinates = np.array([[0.0, 0.0, 0.5], [0.0, 1.0, 0.5], + ... [0.5, 1, 0.5], [0.5, 0.5, 0.0], + ... [0.5, 0.5, 1.0]]) + >>> # The indices in the coordinates array of mesh vertices. + >>> # This mesh has two elements. + >>> connectivity = np.array([[0, 1, 2, 4], [0, 1, 2, 3]]) + >>> + >>> # Field data defined at the centers of the two mesh elements. + >>> elem_data = { + ... ('connect1', 'elem_field'): np.array([1, 2]) + ... } + >>> + >>> # Field data defined at node vertices + >>> node_data = { + ... ('connect1', 'node_field'): np.array([[0.0, 1.0, 2.0, 4.0], + ... [0.0, 1.0, 2.0, 3.0]]) + ... } + >>> + >>> ds = yt.load_unstructured_mesh(connectivity, coordinates, + ... elem_data=elem_data, + ... node_data=node_data) + """ + + dimensionality = coordinates.shape[1] + domain_dimensions = np.ones(3, "int32") * 2 + nprocs = 1 + + if elem_data is None and node_data is None: + raise RuntimeError("No data supplied in load_unstructured_mesh.") + + if isinstance(connectivity, list): + num_meshes = len(connectivity) + else: + num_meshes = 1 + connectivity = ensure_list(connectivity) + + if elem_data is None: + elem_data = [{} for i in range(num_meshes)] + elem_data = ensure_list(elem_data) + + if node_data is None: + node_data = [{} for i in range(num_meshes)] + node_data = ensure_list(node_data) + + data = [{} for i in range(num_meshes)] + for elem_dict, data_dict in zip(elem_data, data): + for field, values in elem_dict.items(): + data_dict[field] = values + for node_dict, data_dict in zip(node_data, data): + for field, values in node_dict.items(): + data_dict[field] = values + data = ensure_list(data) + + if bbox is None: + bbox = [ + [ + coordinates[:, i].min() - 0.1 * abs(coordinates[:, i].min()), + coordinates[:, i].max() + 0.1 * abs(coordinates[:, i].max()), + ] + for i in range(dimensionality) + ] + + if dimensionality < 3: + bbox.append([0.0, 1.0]) + if dimensionality < 2: + bbox.append([0.0, 1.0]) + + # handle pseudo-dims here + num_pseudo_dims = get_num_pseudo_dims(coordinates) + dimensionality -= num_pseudo_dims + for i in range(dimensionality, 3): + bbox[i][0] = 0.0 + bbox[i][1] = 1.0 + + bbox = np.array(bbox, dtype=np.float64) + domain_left_edge = np.array(bbox[:, 0], "float64") + domain_right_edge = np.array(bbox[:, 1], "float64") + grid_levels = np.zeros(nprocs, dtype="int32").reshape((nprocs, 1)) + + field_units = {} + particle_types = {} + sfh = StreamDictFieldHandler() + + sfh.update({"connectivity": connectivity, "coordinates": coordinates}) + for i, d in enumerate(data): + _f_unit, _data, _ = process_data(d) + field_units.update(_f_unit) + sfh[i] = _data + particle_types.update(set_particle_types(d)) + # Simple check for axis length correctness + if 0 and len(data) > 0: + fn = list(sorted(data))[0] + array_values = data[fn] + if array_values.size != connectivity.shape[0]: + mylog.error( + "Dimensions of array must be one fewer than the coordinate set." + ) + raise RuntimeError + grid_left_edges = domain_left_edge + grid_right_edges = domain_right_edge + grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") + + if length_unit is None: + length_unit = "code_length" + if mass_unit is None: + mass_unit = "code_mass" + if time_unit is None: + time_unit = "code_time" + if velocity_unit is None: + velocity_unit = "code_velocity" + if magnetic_unit is None: + magnetic_unit = "code_magnetic" + + # I'm not sure we need any of this. + handler = StreamHandler( + grid_left_edges, + grid_right_edges, + grid_dimensions, + grid_levels, + -np.ones(nprocs, dtype="int64"), + np.zeros(nprocs, dtype="int64").reshape(nprocs, 1), # Temporary + np.zeros(nprocs).reshape((nprocs, 1)), + sfh, + field_units, + (length_unit, mass_unit, time_unit, velocity_unit, magnetic_unit), + particle_types=particle_types, + periodicity=periodicity, + ) + + handler.name = "UnstructuredMeshData" + handler.domain_left_edge = domain_left_edge + handler.domain_right_edge = domain_right_edge + handler.refine_by = 2 + handler.dimensionality = dimensionality + handler.domain_dimensions = domain_dimensions + handler.simulation_time = sim_time + handler.cosmology_simulation = 0 + + sds = StreamUnstructuredMeshDataset( + handler, geometry=geometry, unit_system=unit_system + ) + + fluid_types = ["all"] + for i in range(1, num_meshes + 1): + fluid_types += ["connect%d" % i] + sds.fluid_types = tuple(fluid_types) + + def flatten(l): + return [item for sublist in l for item in sublist] + + sds._node_fields = flatten([[f[1] for f in m] for m in node_data if m]) + sds._elem_fields = flatten([[f[1] for f in m] for m in elem_data if m]) + sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1] + + return sds From 0ec474d58cad3ae850e104e7c651d9594612a005 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 16:40:19 +0200 Subject: [PATCH 495/653] relocate convenience loaders to yt.loaders --- setup.cfg | 2 +- yt/convenience.py | 145 ++---------------------- yt/data_objects/time_series.py | 2 + yt/loaders.py | 201 ++++++++++++++++++++++++++++++--- yt/utilities/load_sample.py | 3 +- 5 files changed, 196 insertions(+), 157 deletions(-) diff --git a/setup.cfg b/setup.cfg index 8ae6f8cc87d..73bd49bc63a 100644 --- a/setup.cfg +++ b/setup.cfg @@ -14,7 +14,7 @@ max-line-length=88 exclude = doc, benchmarks, */api.py, # avoid spurious "unused import" - yt/loaders.py, # avoid spurious "unused import" + yt/convenience.py, # avoid spurious "unused import" */__init__.py, # avoid spurious "unused import" */__config__.py, # autogenerated yt/extern, # vendored libraries diff --git a/yt/convenience.py b/yt/convenience.py index 3c68fcf9a2f..c887d3390f9 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -1,137 +1,8 @@ -import os - -# Named imports -from yt.config import ytcfg -from yt.utilities.exceptions import ( - YTAmbiguousDataType, - YTOutputNotIdentified, - YTSimulationNotIdentified, -) -from yt.utilities.hierarchy_inspection import find_lowest_subclasses -from yt.utilities.object_registries import ( - output_type_registry, - simulation_time_series_registry, -) - - -def load(fn, *args, **kwargs): - """ - Load a Dataset or DatasetSeries object. - The data format is automatically discovered, and the exact return type is the - corresponding subclass of :class:`yt.data_objects.static_output.Dataset`. - A :class:`yt.data_objects.time_series.DatasetSeries` is created if the first - argument is a pattern. - - Parameters - ---------- - fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) - A path to the data location. This can be a file name, directory name, a glob - pattern, or a url (for data types that support it). - - Additional arguments, if any, are passed down to the return class. - - Returns - ------- - :class:`yt.data_objects.static_output.Dataset` object - If fn is a single path, create a Dataset from the appropriate subclass. - - :class:`yt.data_objects.time_series.DatasetSeries` - If fn is a glob pattern (i.e. containing wildcards '[]?!*'), create a series. - - Raises - ------ - FileNotFoundError - If fn does not match any existing file or directory. - - yt.utilities.exceptions.YTOutputNotIdentified - If fn matches existing files or directories with undetermined format. - - yt.utilities.exceptions.YTAmbiguousDataType - If the data format matches more than one class of similar specilization levels. - """ - fn = os.path.expanduser(fn) - - if any(wildcard in fn for wildcard in "[]?!*"): - from yt.data_objects.time_series import DatasetSeries - - return DatasetSeries(fn, *args, **kwargs) - - # Unless the dataset starts with http, - # look for it using the path or relative to the data dir (in this order). - if not (os.path.exists(fn) or fn.startswith("http")): - data_dir = ytcfg.get("yt", "test_data_dir") - alt_fn = os.path.join(data_dir, fn) - if os.path.exists(alt_fn): - fn = alt_fn - else: - msg = f"No such file or directory: '{fn}'." - if os.path.exists(data_dir): - msg += f"\n(Also tried '{alt_fn}')." - raise FileNotFoundError(msg) - - candidates = [] - for cls in output_type_registry.values(): - if cls._is_valid(fn, *args, **kwargs): - candidates.append(cls) - - # Find only the lowest subclasses, i.e. most specialised front ends - candidates = find_lowest_subclasses(candidates) - - if len(candidates) == 1: - return candidates[0](fn, *args, **kwargs) - - if len(candidates) > 1: - raise YTAmbiguousDataType(fn, candidates) - - raise YTOutputNotIdentified(fn, args, kwargs) - - -def load_simulation(fn, simulation_type, find_outputs=False): - """ - Load a simulation time series object of the specified simulation type. - - Parameters - ---------- - fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) - Name of the data file or directory. - - simulation_type : str - E.g. 'Enzo' - - find_outputs : bool - Defaults to False - - Raises - ------ - FileNotFoundError - If fn is not found. - - yt.utilities.exceptions.YTSimulationNotIdentified - If simulation_type is unknown. - """ - - if not os.path.exists(fn): - alt_fn = os.path.join(ytcfg.get("yt", "test_data_dir"), fn) - if os.path.exists(alt_fn): - fn = alt_fn - else: - raise FileNotFoundError(f"No such file or directory: '{fn}'") - - try: - cls = simulation_time_series_registry[simulation_type] - except KeyError: - raise YTSimulationNotIdentified(simulation_type) - - return cls(fn, find_outputs=find_outputs) - - -def simulation(fn, simulation_type, find_outputs=False): - from yt.funcs import issue_deprecation_warning - - issue_deprecation_warning( - "yt.simulation is a deprecated alias for yt.load_simulation" - "and will be removed in a future version of yt." - ) - return load_simulation( - fn=fn, simulation_type=simulation_type, find_outputs=find_outputs - ) +# this is a deprecated module +from .funcs import issue_deprecation_warning +from .loaders import load, load_simulation, simulation + +issue_deprecation_warning( + "importing from yt.convenience is deprecated in favor of yt.loaders\n" + "This will become an error in yt 4.1" +) \ No newline at end of file diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 95a045c5877..0c0635ad65f 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -425,6 +425,8 @@ def from_output_log(cls, output_log, line_prefix="DATASET WRITTEN", parallel=Tru _dataset_cls = None def _load(self, output_fn, **kwargs): + from yt.loaders import load + if self._dataset_cls is not None: return self._dataset_cls(output_fn, **kwargs) elif self._mixed_dataset_types: diff --git a/yt/loaders.py b/yt/loaders.py index f5f0885b40a..c1899a34d88 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -5,31 +5,155 @@ # note: in the future, functions could be moved here instead # in which case, this file should be removed from flake8 ignore list in setup.cfg +import os + import numpy as np -from yt.frontends.exodus_ii.util import get_num_pseudo_dims -from yt.frontends.stream.data_structures import ( - StreamDataset, - StreamDictFieldHandler, - StreamHandler, - StreamHexahedralDataset, - StreamOctreeDataset, - StreamParticlesDataset, - StreamUnstructuredMeshDataset, -) -from yt.frontends.stream.definitions import ( - assign_particle_data, - process_data, - set_particle_types, -) +from yt.config import ytcfg from yt.funcs import ensure_list, issue_deprecation_warning from yt.utilities.decompose import decompose_array, get_psize -from yt.utilities.exceptions import YTIllDefinedAMR +from yt.utilities.exceptions import ( + YTAmbiguousDataType, + YTIllDefinedAMR, + YTOutputNotIdentified, + YTSimulationNotIdentified, +) +from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.lib.misc_utilities import get_box_grids_level from yt.utilities.logger import ytLogger as mylog +from yt.utilities.parameter_file_storage import ( + output_type_registry, + simulation_time_series_registry, +) + +from .utilities.load_sample import load_sample # noqa F401 + +# --- Loaders for known data formats --- + + +def load(fn, *args, **kwargs): + """ + Load a Dataset or DatasetSeries object. + The data format is automatically discovered, and the exact return type is the + corresponding subclass of :class:`yt.data_objects.static_output.Dataset`. + A :class:`yt.data_objects.time_series.DatasetSeries` is created if the first + argument is a pattern. + + Parameters + ---------- + fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) + A path to the data location. This can be a file name, directory name, a glob + pattern, or a url (for data types that support it). + + Additional arguments, if any, are passed down to the return class. + + Returns + ------- + :class:`yt.data_objects.static_output.Dataset` object + If fn is a single path, create a Dataset from the appropriate subclass. + + :class:`yt.data_objects.time_series.DatasetSeries` + If fn is a glob pattern (i.e. containing wildcards '[]?!*'), create a series. + + Raises + ------ + FileNotFoundError + If fn does not match any existing file or directory. + + yt.utilities.exceptions.YTOutputNotIdentified + If fn matches existing files or directories with undetermined format. + + yt.utilities.exceptions.YTAmbiguousDataType + If the data format matches more than one class of similar specilization levels. + """ + fn = os.path.expanduser(fn) + + if any(wildcard in fn for wildcard in "[]?!*"): + from yt.data_objects.time_series import DatasetSeries + + return DatasetSeries(fn, *args, **kwargs) + + # Unless the dataset starts with http, look for it using the path or relative to the data dir (in this order). + if not (os.path.exists(fn) or fn.startswith("http")): + data_dir = ytcfg.get("yt", "test_data_dir") + alt_fn = os.path.join(data_dir, fn) + if os.path.exists(alt_fn): + fn = alt_fn + else: + msg = f"No such file or directory: '{fn}'." + if os.path.exists(data_dir): + msg += f"\n(Also tried '{alt_fn}')." + raise FileNotFoundError(msg) + + candidates = [] + for cls in output_type_registry.values(): + if cls._is_valid(fn, *args, **kwargs): + candidates.append(cls) + + # Find only the lowest subclasses, i.e. most specialised front ends + candidates = find_lowest_subclasses(candidates) + + if len(candidates) == 1: + return candidates[0](fn, *args, **kwargs) + + if len(candidates) > 1: + raise YTAmbiguousDataType(fn, candidates) -from .convenience import load, load_simulation, simulation -from .utilities.load_sample import load_sample + raise YTOutputNotIdentified(fn, args, kwargs) + + +def load_simulation(fn, simulation_type, find_outputs=False): + """ + Load a simulation time series object of the specified simulation type. + + Parameters + ---------- + fn : str, os.Pathlike, or byte (types supported by os.path.expandusers) + Name of the data file or directory. + + simulation_type : str + E.g. 'Enzo' + + find_outputs : bool + Defaults to False + + Raises + ------ + FileNotFoundError + If fn is not found. + + yt.utilities.exceptions.YTSimulationNotIdentified + If simulation_type is unknown. + """ + + if not os.path.exists(fn): + alt_fn = os.path.join(ytcfg.get("yt", "test_data_dir"), fn) + if os.path.exists(alt_fn): + fn = alt_fn + else: + raise FileNotFoundError(f"No such file or directory: '{fn}'") + + try: + cls = simulation_time_series_registry[simulation_type] + except KeyError: + raise YTSimulationNotIdentified(simulation_type) + + return cls(fn, find_outputs=find_outputs) + + +def simulation(fn, simulation_type, find_outputs=False): + from yt.funcs import issue_deprecation_warning + + issue_deprecation_warning( + "yt.simulation is a deprecated alias for yt.load_simulation" + "and will be removed in a future version of yt." + ) + return load_simulation( + fn=fn, simulation_type=simulation_type, find_outputs=find_outputs + ) + + +# --- Loaders for generic ("stream") data --- def load_uniform_grid( @@ -109,6 +233,16 @@ def load_uniform_grid( YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, 0.97506269, 0.03047113]) g/cm**3 """ + from yt.frontends.stream.data_structures import ( + StreamDataset, + StreamDictFieldHandler, + StreamHandler, + ) + from yt.frontends.stream.definitions import ( + assign_particle_data, + process_data, + set_particle_types, + ) domain_dimensions = np.array(domain_dimensions) if bbox is None: @@ -317,6 +451,12 @@ def load_amr_grids( ... >>> ds = load_amr_grids(grid_data, [32, 32, 32], length_unit=1.0) """ + from yt.frontends.stream.data_structures import ( + StreamDataset, + StreamDictFieldHandler, + StreamHandler, + ) + from yt.frontends.stream.definitions import process_data, set_particle_types domain_dimensions = np.array(domain_dimensions) ngrids = len(grid_data) @@ -498,6 +638,12 @@ def load_particles( >>> ds = load_particles(data, 3.08e24, bbox=bbox) """ + from yt.frontends.stream.data_structures import ( + StreamDictFieldHandler, + StreamHandler, + StreamParticlesDataset, + ) + from yt.frontends.stream.definitions import process_data, set_particle_types domain_dimensions = np.ones(3, "int32") nprocs = 1 @@ -654,6 +800,12 @@ def load_hexahedral_mesh( ("spherical", ("theta", "phi", "r")). """ + from yt.frontends.stream.data_structures import ( + StreamDictFieldHandler, + StreamHandler, + StreamHexahedralDataset, + ) + from yt.frontends.stream.definitions import process_data, set_particle_types domain_dimensions = np.ones(3, "int32") * 2 nprocs = 1 @@ -802,6 +954,12 @@ def load_octree( ... partial_coverage=0) """ + from yt.frontends.stream.data_structures import ( + StreamDictFieldHandler, + StreamHandler, + StreamOctreeDataset, + ) + from yt.frontends.stream.definitions import process_data, set_particle_types if not isinstance(octree_mask, np.ndarray) or octree_mask.dtype != np.uint8: raise TypeError("octree_mask should be a Numpy array with type uint8") @@ -981,6 +1139,13 @@ def load_unstructured_mesh( ... elem_data=elem_data, ... node_data=node_data) """ + from yt.frontends.exodus_ii.util import get_num_pseudo_dims + from yt.frontends.stream.data_structures import ( + StreamDictFieldHandler, + StreamHandler, + StreamUnstructuredMeshDataset, + ) + from yt.frontends.stream.definitions import process_data, set_particle_types dimensionality = coordinates.shape[1] domain_dimensions = np.ones(3, "int32") * 2 diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py index 4458a21a6be..e2c2222ef06 100644 --- a/yt/utilities/load_sample.py +++ b/yt/utilities/load_sample.py @@ -9,7 +9,6 @@ import yt.utilities.sample_data as sd from yt.funcs import mylog -from yt.loaders import load from yt.utilities.on_demand_imports import _pooch as pch @@ -38,6 +37,8 @@ def load_sample(name=None, specific_file=None, pbar=True): display a progress bar """ + from yt.loaders import load + fido = sd.Fido() if name is None: keys = [] From 853621cc5a1d74e9eba532e1962376cd6a921b32 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 16:44:15 +0200 Subject: [PATCH 496/653] refactor: avoid redundancy in helper functions to load_sample --- yt/utilities/load_sample.py | 22 ++++------------------ 1 file changed, 4 insertions(+), 18 deletions(-) diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py index e2c2222ef06..bc310a53475 100644 --- a/yt/utilities/load_sample.py +++ b/yt/utilities/load_sample.py @@ -62,11 +62,13 @@ def load_sample(name=None, specific_file=None, pbar=True): mylog.warning("tqdm is not installed, progress bar can not be displayed.") if extension == "h5": - fname = fetch_noncompressed_file(fileext, fido, downloader=downloader) + processor = pch.pooch.Untar() else: # we are going to assume most files that exist on the hub are # compressed in .tar folders. Some may not. - fname = fetch_compressed_file(fileext, fido, downloader=downloader) + processor = None + + fname = fido.fido.fetch(name, processor=processor, downloader=downloader) # The `folder_path` variable is used here to notify the user where the # files have been unpacked to. However, we can't assume this is reliable @@ -141,19 +143,3 @@ def _validate_sampledata_name(name): fileext = name basename = base return fileext, basename, extension - - -def fetch_compressed_file(name, fido, downloader=None): - """ - Load a large compressed file from the data registry - """ - fname = fido.fido.fetch(name, processor=pch.pooch.Untar(), downloader=downloader) - return fname - - -def fetch_noncompressed_file(name, fido, downloader=None): - """ - Load an uncompressed file from the data registry - """ - fname = fido.fido.fetch(name, downloader=downloader) - return fname From 44127dfe2dfcadf1bbb63abc35b7fb12af7e9610 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 17:12:54 +0200 Subject: [PATCH 497/653] relocate load_sample to yt.loaders --- yt/loaders.py | 144 +++++++++++++++++++++++- yt/utilities/load_sample.py | 145 ------------------------- yt/utilities/tests/test_load_sample.py | 2 +- 3 files changed, 141 insertions(+), 150 deletions(-) delete mode 100644 yt/utilities/load_sample.py diff --git a/yt/loaders.py b/yt/loaders.py index c1899a34d88..3b054da90f7 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -9,8 +9,9 @@ import numpy as np +import yt.utilities.sample_data as sd from yt.config import ytcfg -from yt.funcs import ensure_list, issue_deprecation_warning +from yt.funcs import ensure_list, issue_deprecation_warning, mylog from yt.utilities.decompose import decompose_array, get_psize from yt.utilities.exceptions import ( YTAmbiguousDataType, @@ -20,14 +21,12 @@ ) from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.lib.misc_utilities import get_box_grids_level -from yt.utilities.logger import ytLogger as mylog +from yt.utilities.on_demand_imports import _pooch as pch from yt.utilities.parameter_file_storage import ( output_type_registry, simulation_time_series_registry, ) -from .utilities.load_sample import load_sample # noqa F401 - # --- Loaders for known data formats --- @@ -1279,3 +1278,140 @@ def flatten(l): sds.default_field = [f for f in sds.field_list if f[0] == "connect1"][-1] return sds + + +# --- Loader for yt sample datasets --- +# This utility will check to see if sample data exists on disc. +# If not, it will download it. + + +def load_sample(name=None, specific_file=None, pbar=True): + """ + Load sample data with yt. Simple wrapper around yt.load to include fetching + data with pooch. + + Parameters + ---------- + name : str or None + The name of the sample data to load. This is generally the name of the + folder of the dataset. For IsolatedGalaxy, the name would be + `IsolatedGalaxy`. If `None` is supplied, the return value + will be a list of all known datasets (by name). + + specific_file : str, optional + optional argument -- the name of the file to load that is located + within sample dataset of `name`. For the dataset `enzo_cosmology_plus`, + which has a number of timesteps available, one may wish to choose + DD0003. The file specifically would be + `enzo_cosmology_plus/DD0003/DD0003`, and the argument passed to this + variable would be `DD0003/DD0003` + + pbar: bool + display a progress bar + + """ + + fido = sd.Fido() + if name is None: + keys = [] + for key in fido._registry: + for ext in sd._extensions_to_strip: + if key.endswith(ext): + key = key[: -len(ext)] + keys.append(key) + return keys + + base_path = fido.fido.path + fileext, name, extension = _validate_sampledata_name(name) + + downloader = None + if pbar: + try: + import tqdm # noqa: F401 + + downloader = pch.pooch.HTTPDownloader(progressbar=True) + except ImportError: + mylog.warning("tqdm is not installed, progress bar can not be displayed.") + + if extension == "h5": + processor = pch.pooch.Untar() + else: + # we are going to assume most files that exist on the hub are + # compressed in .tar folders. Some may not. + processor = None + + fname = fido.fido.fetch(name, processor=processor, downloader=downloader) + + # The `folder_path` variable is used here to notify the user where the + # files have been unpacked to. However, we can't assume this is reliable + # because in some cases the common path will overlap with the `load_name` + # variable of the file. + folder_path = os.path.commonprefix(fname) + mylog.info("Files located at %s", folder_path) + + # Location of the file to load automatically, registered in the Fido class + info = fido[fileext] + file_lookup = info["load_name"] + optional_args = info["load_kwargs"] + + if specific_file is None: + # right now work on loading only untarred files. build out h5 later + mylog.info("Default to loading %s for %s dataset", file_lookup, name) + loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, file_lookup) + else: + mylog.info("Loading %s for %s dataset", specific_file, name) + loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, specific_file) + + return load(loaded_file, **optional_args) + + +def _validate_sampledata_name(name): + """ + format name of sample data passed to function, accepts a named string + argument and parses it to determine the sample data name, what type of + extension it has, or other relevant information. + + returns + ------- + fileext : str + The name of the sample data, with the file extension + example: "IsolatedGalaxy.tar.gz" + basename : str + The name of the sample data, without the file extension + example: "IsolatedGalaxy" + extension : str + name of extension of remote sample data + example: "h5" or "tar" + """ + + if not isinstance(name, str): + mylog.error("The argument %s passed to load_sample() is not a string.", name) + + # now get the extension if it exists + base, ext = os.path.splitext(name) + if ext == "": + # Right now we are assuming that any name passed without an explicit + # extension is packed in a tarball. This logic can be modified later to + # be more flexible. + fileext = "%s.tar.gz" % name + basename = name + extension = "tar" + elif ext == ".gz": + fileext = name + basename = os.path.splitext(base)[0] + extension = "tar" + elif ext in [".h5", ".hdf5"]: + fileext = name + basename = base + extension = "h5" + else: + mylog.info( + """extension of %s for dataset %s is unexpected. the `load_data` + function may not work as expected""", + ext, + name, + ) + extension = ext + fileext = name + basename = base + return fileext, basename, extension diff --git a/yt/utilities/load_sample.py b/yt/utilities/load_sample.py deleted file mode 100644 index bc310a53475..00000000000 --- a/yt/utilities/load_sample.py +++ /dev/null @@ -1,145 +0,0 @@ -""" -sample data manager for yt - -This utility will check to see if sample data exists on disc. -If not, it will download it. - -""" -import os - -import yt.utilities.sample_data as sd -from yt.funcs import mylog -from yt.utilities.on_demand_imports import _pooch as pch - - -def load_sample(name=None, specific_file=None, pbar=True): - """ - Load sample data with yt. Simple wrapper around yt.load to include fetching - data with pooch. - - Parameters - ---------- - name : str or None - The name of the sample data to load. This is generally the name of the - folder of the dataset. For IsolatedGalaxy, the name would be - `IsolatedGalaxy`. If `None` is supplied, the return value - will be a list of all known datasets (by name). - - specific_file : str, optional - optional argument -- the name of the file to load that is located - within sample dataset of `name`. For the dataset `enzo_cosmology_plus`, - which has a number of timesteps available, one may wish to choose - DD0003. The file specifically would be - `enzo_cosmology_plus/DD0003/DD0003`, and the argument passed to this - variable would be `DD0003/DD0003` - - pbar: bool - display a progress bar - - """ - from yt.loaders import load - - fido = sd.Fido() - if name is None: - keys = [] - for key in fido._registry: - for ext in sd._extensions_to_strip: - if key.endswith(ext): - key = key[: -len(ext)] - keys.append(key) - return keys - - base_path = fido.fido.path - fileext, name, extension = _validate_sampledata_name(name) - - downloader = None - if pbar: - try: - import tqdm # noqa: F401 - - downloader = pch.pooch.HTTPDownloader(progressbar=True) - except ImportError: - mylog.warning("tqdm is not installed, progress bar can not be displayed.") - - if extension == "h5": - processor = pch.pooch.Untar() - else: - # we are going to assume most files that exist on the hub are - # compressed in .tar folders. Some may not. - processor = None - - fname = fido.fido.fetch(name, processor=processor, downloader=downloader) - - # The `folder_path` variable is used here to notify the user where the - # files have been unpacked to. However, we can't assume this is reliable - # because in some cases the common path will overlap with the `load_name` - # variable of the file. - folder_path = os.path.commonprefix(fname) - mylog.info("Files located at %s", folder_path) - - # Location of the file to load automatically, registered in the Fido class - info = fido[fileext] - file_lookup = info["load_name"] - optional_args = info["load_kwargs"] - - if specific_file is None: - # right now work on loading only untarred files. build out h5 later - mylog.info("Default to loading %s for %s dataset", file_lookup, name) - loaded_file = os.path.join(base_path, f"{fileext}.untar", name, file_lookup) - else: - mylog.info("Loading %s for %s dataset", specific_file, name) - loaded_file = os.path.join(base_path, f"{fileext}.untar", name, specific_file) - - return load(loaded_file, **optional_args) - - -def _validate_sampledata_name(name): - """ - format name of sample data passed to function, accepts a named string - argument and parses it to determine the sample data name, what type of - extension it has, or other relevant information. - - returns - ------- - fileext : str - The name of the sample data, with the file extension - example: "IsolatedGalaxy.tar.gz" - basename : str - The name of the sample data, without the file extension - example: "IsolatedGalaxy" - extension : str - name of extension of remote sample data - example: "h5" or "tar" - """ - - if not isinstance(name, str): - mylog.error("The argument %s passed to load_sample() is not a string.", name) - - # now get the extension if it exists - base, ext = os.path.splitext(name) - if ext == "": - # Right now we are assuming that any name passed without an explicit - # extension is packed in a tarball. This logic can be modified later to - # be more flexible. - fileext = f"{name}.tar.gz" - basename = name - extension = "tar" - elif ext == ".gz": - fileext = name - basename = os.path.splitext(base)[0] - extension = "tar" - elif ext in [".h5", ".hdf5"]: - fileext = name - basename = base - extension = "h5" - else: - mylog.info( - """extension of %s for dataset %s is unexpected. the `load_data` - function may not work as expected""", - ext, - name, - ) - extension = ext - fileext = name - basename = base - return fileext, basename, extension diff --git a/yt/utilities/tests/test_load_sample.py b/yt/utilities/tests/test_load_sample.py index f3116e6271c..0a2eaf62e6a 100644 --- a/yt/utilities/tests/test_load_sample.py +++ b/yt/utilities/tests/test_load_sample.py @@ -1,5 +1,5 @@ +from yt.loaders import _validate_sampledata_name from yt.testing import assert_equal -from yt.utilities.load_sample import _validate_sampledata_name names = { "t1": { From 17c0bc15b019b4124201172a205db9a3a947d9ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 17:14:12 +0200 Subject: [PATCH 498/653] remove now empty file From fe76895860ea564d97dd3ebb6979f81907b5d4da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 17:25:51 +0200 Subject: [PATCH 499/653] raise YTModuleRemoved in convenience --- yt/convenience.py | 9 ++------- 1 file changed, 2 insertions(+), 7 deletions(-) diff --git a/yt/convenience.py b/yt/convenience.py index c887d3390f9..3bbd821988c 100644 --- a/yt/convenience.py +++ b/yt/convenience.py @@ -1,8 +1,3 @@ -# this is a deprecated module -from .funcs import issue_deprecation_warning -from .loaders import load, load_simulation, simulation +from yt.utilities.exceptions import YTModuleRemoved -issue_deprecation_warning( - "importing from yt.convenience is deprecated in favor of yt.loaders\n" - "This will become an error in yt 4.1" -) \ No newline at end of file +raise YTModuleRemoved("yt.convenience", "yt.loaders") From 3293256d56fb44cb9976c55ef67b51fe86b2dee3 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 10 Aug 2020 17:28:39 +0200 Subject: [PATCH 500/653] fix broken imports --- yt/data_objects/tests/test_covering_grid.py | 3 +-- yt/data_objects/tests/test_exclude_functions.py | 2 +- yt/frontends/stream/tests/test_outputs.py | 2 +- yt/loaders.py | 4 ++-- 4 files changed, 5 insertions(+), 6 deletions(-) diff --git a/yt/data_objects/tests/test_covering_grid.py b/yt/data_objects/tests/test_covering_grid.py index 657386a4f38..14a169f7095 100644 --- a/yt/data_objects/tests/test_covering_grid.py +++ b/yt/data_objects/tests/test_covering_grid.py @@ -1,8 +1,7 @@ import numpy as np from yt.fields.derived_field import ValidateParameter -from yt.frontends.stream.loaders import load_particles -from yt.loaders import load +from yt.loaders import load, load_particles from yt.testing import ( assert_almost_equal, assert_array_equal, diff --git a/yt/data_objects/tests/test_exclude_functions.py b/yt/data_objects/tests/test_exclude_functions.py index e2abc982242..42f244bbc2c 100644 --- a/yt/data_objects/tests/test_exclude_functions.py +++ b/yt/data_objects/tests/test_exclude_functions.py @@ -1,6 +1,6 @@ import numpy as np -from yt.frontends.stream.loaders import load_uniform_grid +from yt.loaders import load_uniform_grid from yt.testing import assert_equal, fake_random_ds diff --git a/yt/frontends/stream/tests/test_outputs.py b/yt/frontends/stream/tests/test_outputs.py index 74b8fbfcc02..fba4f323408 100644 --- a/yt/frontends/stream/tests/test_outputs.py +++ b/yt/frontends/stream/tests/test_outputs.py @@ -5,7 +5,7 @@ import numpy as np -from yt.frontends.stream.loaders import load_particles, load_uniform_grid +from yt.loaders import load_particles, load_uniform_grid from yt.testing import assert_equal, assert_raises from yt.utilities.exceptions import ( YTInconsistentGridFieldShape, diff --git a/yt/loaders.py b/yt/loaders.py index 3b054da90f7..b05e4ca79e4 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -21,11 +21,11 @@ ) from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.lib.misc_utilities import get_box_grids_level -from yt.utilities.on_demand_imports import _pooch as pch -from yt.utilities.parameter_file_storage import ( +from yt.utilities.object_registries import ( output_type_registry, simulation_time_series_registry, ) +from yt.utilities.on_demand_imports import _pooch as pch # --- Loaders for known data formats --- From 04a7910a904e5873bab59e2cac9d8032073b24eb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 10:46:22 +0200 Subject: [PATCH 501/653] refactor: rename (tmp) objects from pooch/fido for load_sample --- yt/loaders.py | 17 +++++++++-------- yt/utilities/sample_data.py | 8 ++++---- 2 files changed, 13 insertions(+), 12 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index b05e4ca79e4..76ed2f762e1 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -9,7 +9,6 @@ import numpy as np -import yt.utilities.sample_data as sd from yt.config import ytcfg from yt.funcs import ensure_list, issue_deprecation_warning, mylog from yt.utilities.decompose import decompose_array, get_psize @@ -25,7 +24,8 @@ output_type_registry, simulation_time_series_registry, ) -from yt.utilities.on_demand_imports import _pooch as pch +from yt.utilities.on_demand_imports import _pooch as pooch +from yt.utilities.sample_data import PoochSomething, _extensions_to_strip # --- Loaders for known data formats --- @@ -1311,17 +1311,18 @@ def load_sample(name=None, specific_file=None, pbar=True): """ - fido = sd.Fido() + fido = PoochSomething() + if name is None: keys = [] for key in fido._registry: - for ext in sd._extensions_to_strip: + for ext in _extensions_to_strip: if key.endswith(ext): key = key[: -len(ext)] keys.append(key) return keys - base_path = fido.fido.path + base_path = fido.pooch_obj.path fileext, name, extension = _validate_sampledata_name(name) downloader = None @@ -1329,18 +1330,18 @@ def load_sample(name=None, specific_file=None, pbar=True): try: import tqdm # noqa: F401 - downloader = pch.pooch.HTTPDownloader(progressbar=True) + downloader = pooch.HTTPDownloader(progressbar=True) except ImportError: mylog.warning("tqdm is not installed, progress bar can not be displayed.") if extension == "h5": - processor = pch.pooch.Untar() + processor = pooch.Untar() else: # we are going to assume most files that exist on the hub are # compressed in .tar folders. Some may not. processor = None - fname = fido.fido.fetch(name, processor=processor, downloader=downloader) + fname = fido.pooch_obj.fetch(name, processor=processor, downloader=downloader) # The `folder_path` variable is used here to notify the user where the # files have been unpacked to. However, we can't assume this is reliable diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py index 83fb882d507..a4413f577dc 100644 --- a/yt/utilities/sample_data.py +++ b/yt/utilities/sample_data.py @@ -9,7 +9,7 @@ import pkg_resources from yt.config import ytcfg -from yt.utilities.on_demand_imports import _pooch as pch +from yt.utilities.on_demand_imports import _pooch as pooch ## The format of the data registry json: ## @@ -25,7 +25,7 @@ _extensions_to_strip = (".tgz", ".tar.gz", ".gz") -class Fido: +class PoochSomething: r""" Container for a pooch object used to fetch remote data that isn't already stored locally. @@ -38,8 +38,8 @@ def __init__(self, filename="sample_data_registry.json", cache_dir=None): if os.path.isdir(ytcfg.get("yt", "test_data_dir")): cache_dir = ytcfg.get("yt", "test_data_dir") else: - cache_dir = pch.pooch.os_cache("yt") - self.fido = pch.pooch.create( + cache_dir = pooch.pooch.os_cache("yt") + self.pooch_obj = pooch.pooch.create( path=cache_dir, registry={_: self._registry[_]["hash"] for _ in self._registry}, urls={_: self._registry[_]["url"] for _ in self._registry}, From d2baad62803c34ffd3387cc3e88f59e4dfb076c2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 13:16:52 +0200 Subject: [PATCH 502/653] refactor: fix an error where the filename was wrongly passed to Pooch object --- yt/loaders.py | 29 +++++++++++++++++++---------- 1 file changed, 19 insertions(+), 10 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index 76ed2f762e1..dc4ec839682 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1285,14 +1285,14 @@ def flatten(l): # If not, it will download it. -def load_sample(name=None, specific_file=None, pbar=True): +def load_sample(fn=None, specific_file=None, pbar=True): """ Load sample data with yt. Simple wrapper around yt.load to include fetching data with pooch. Parameters ---------- - name : str or None + fn : str or None The name of the sample data to load. This is generally the name of the folder of the dataset. For IsolatedGalaxy, the name would be `IsolatedGalaxy`. If `None` is supplied, the return value @@ -1313,7 +1313,7 @@ def load_sample(name=None, specific_file=None, pbar=True): fido = PoochSomething() - if name is None: + if fn is None: keys = [] for key in fido._registry: for ext in _extensions_to_strip: @@ -1323,7 +1323,10 @@ def load_sample(name=None, specific_file=None, pbar=True): return keys base_path = fido.pooch_obj.path - fileext, name, extension = _validate_sampledata_name(name) + + registered_fname, name, extension = _validate_sample_fname( + fn + ) # todo: make this part of the class downloader = None if pbar: @@ -1341,32 +1344,38 @@ def load_sample(name=None, specific_file=None, pbar=True): # compressed in .tar folders. Some may not. processor = None - fname = fido.pooch_obj.fetch(name, processor=processor, downloader=downloader) + storage_fname = fido.pooch_obj.fetch( + registered_fname, processor=processor, downloader=downloader + ) # The `folder_path` variable is used here to notify the user where the # files have been unpacked to. However, we can't assume this is reliable # because in some cases the common path will overlap with the `load_name` # variable of the file. - folder_path = os.path.commonprefix(fname) + folder_path = os.path.commonprefix(storage_fname) mylog.info("Files located at %s", folder_path) # Location of the file to load automatically, registered in the Fido class - info = fido[fileext] + info = fido[registered_fname] file_lookup = info["load_name"] optional_args = info["load_kwargs"] if specific_file is None: # right now work on loading only untarred files. build out h5 later mylog.info("Default to loading %s for %s dataset", file_lookup, name) - loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, file_lookup) + loaded_file = os.path.join( + base_path, "%s.untar" % registered_fname, name, file_lookup + ) else: mylog.info("Loading %s for %s dataset", specific_file, name) - loaded_file = os.path.join(base_path, "%s.untar" % fileext, name, specific_file) + loaded_file = os.path.join( + base_path, "%s.untar" % registered_fname, name, specific_file + ) return load(loaded_file, **optional_args) -def _validate_sampledata_name(name): +def _validate_sample_fname(name): """ format name of sample data passed to function, accepts a named string argument and parses it to determine the sample data name, what type of From 18ff712142d34167ef38d0b9c98f9de110911388 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 13:20:29 +0200 Subject: [PATCH 503/653] make validator function part of PoochSomething (tmpname) class --- yt/loaders.py | 54 +------------------------------------ yt/utilities/sample_data.py | 54 +++++++++++++++++++++++++++++++++++++ 2 files changed, 55 insertions(+), 53 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index dc4ec839682..88adb69e1cb 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1324,7 +1324,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): base_path = fido.pooch_obj.path - registered_fname, name, extension = _validate_sample_fname( + registered_fname, name, extension = fido._validate_sample_fname( fn ) # todo: make this part of the class @@ -1373,55 +1373,3 @@ def load_sample(fn=None, specific_file=None, pbar=True): ) return load(loaded_file, **optional_args) - - -def _validate_sample_fname(name): - """ - format name of sample data passed to function, accepts a named string - argument and parses it to determine the sample data name, what type of - extension it has, or other relevant information. - - returns - ------- - fileext : str - The name of the sample data, with the file extension - example: "IsolatedGalaxy.tar.gz" - basename : str - The name of the sample data, without the file extension - example: "IsolatedGalaxy" - extension : str - name of extension of remote sample data - example: "h5" or "tar" - """ - - if not isinstance(name, str): - mylog.error("The argument %s passed to load_sample() is not a string.", name) - - # now get the extension if it exists - base, ext = os.path.splitext(name) - if ext == "": - # Right now we are assuming that any name passed without an explicit - # extension is packed in a tarball. This logic can be modified later to - # be more flexible. - fileext = "%s.tar.gz" % name - basename = name - extension = "tar" - elif ext == ".gz": - fileext = name - basename = os.path.splitext(base)[0] - extension = "tar" - elif ext in [".h5", ".hdf5"]: - fileext = name - basename = base - extension = "h5" - else: - mylog.info( - """extension of %s for dataset %s is unexpected. the `load_data` - function may not work as expected""", - ext, - name, - ) - extension = ext - fileext = name - basename = base - return fileext, basename, extension diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py index a4413f577dc..2f1fe65174b 100644 --- a/yt/utilities/sample_data.py +++ b/yt/utilities/sample_data.py @@ -9,6 +9,7 @@ import pkg_resources from yt.config import ytcfg +from yt.funcs import mylog from yt.utilities.on_demand_imports import _pooch as pooch ## The format of the data registry json: @@ -56,3 +57,56 @@ def __getitem__(self, item): if item + ext in self._registry: return self._registry[item + ext] raise KeyError(item) + + def _validate_sample_fname(self, name): + """ + format name of sample data passed to function, accepts a named string + argument and parses it to determine the sample data name, what type of + extension it has, or other relevant information. + + Returns + ------- + fileext : str + The name of the sample data, with the file extension + example: "IsolatedGalaxy.tar.gz" + basename : str + The name of the sample data, without the file extension + example: "IsolatedGalaxy" + extension : str + name of extension of remote sample data + example: "h5" or "tar" + """ + + if not isinstance(name, str): + mylog.error( + "The argument %s passed to load_sample() is not a string.", name + ) + + # now get the extension if it exists + base, ext = os.path.splitext(name) + if ext == "": + # Right now we are assuming that any name passed without an explicit + # extension is packed in a tarball. This logic can be modified later to + # be more flexible. + fileext = "%s.tar.gz" % name + basename = name + extension = "tar" + elif ext == ".gz": + fileext = name + basename = os.path.splitext(base)[0] + extension = "tar" + elif ext in [".h5", ".hdf5"]: + fileext = name + basename = base + extension = "h5" + else: + mylog.info( + """extension of %s for dataset %s is unexpected. the `load_data` + function may not work as expected""", + ext, + name, + ) + extension = ext + fileext = name + basename = base + return fileext, basename, extension From 27bb52e8ec8d136f96b9dfd00617360270b9879b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 13:22:11 +0200 Subject: [PATCH 504/653] rename class --- yt/loaders.py | 4 ++-- yt/utilities/sample_data.py | 2 +- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index 88adb69e1cb..5974bdefcca 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -25,7 +25,7 @@ simulation_time_series_registry, ) from yt.utilities.on_demand_imports import _pooch as pooch -from yt.utilities.sample_data import PoochSomething, _extensions_to_strip +from yt.utilities.sample_data import PoochHandle, _extensions_to_strip # --- Loaders for known data formats --- @@ -1311,7 +1311,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): """ - fido = PoochSomething() + fido = PoochHandle() if fn is None: keys = [] diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py index 2f1fe65174b..a88919b4597 100644 --- a/yt/utilities/sample_data.py +++ b/yt/utilities/sample_data.py @@ -26,7 +26,7 @@ _extensions_to_strip = (".tgz", ".tar.gz", ".gz") -class PoochSomething: +class PoochHandle: r""" Container for a pooch object used to fetch remote data that isn't already stored locally. From 34f53a96e68f7657409f31e9589702a16c686e33 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 13:28:57 +0200 Subject: [PATCH 505/653] update documentation references --- doc/source/reference/api/api.rst | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/doc/source/reference/api/api.rst b/doc/source/reference/api/api.rst index c4f93f174f1..25f56bdfd7b 100644 --- a/doc/source/reference/api/api.rst +++ b/doc/source/reference/api/api.rst @@ -446,14 +446,15 @@ Loading Data .. autosummary:: - ~yt.convenience.load - ~yt.convenience.simulation - ~yt.frontends.stream.data_structures.load_uniform_grid - ~yt.frontends.stream.data_structures.load_amr_grids - ~yt.frontends.stream.data_structures.load_particles - ~yt.frontends.stream.data_structures.load_octree - ~yt.frontends.stream.data_structures.load_hexahedral_mesh - ~yt.frontends.stream.data_structures.load_unstructured_mesh + ~yt.loaders.load + ~yt.loaders.simulation + ~yt.loaders.load_uniform_grid + ~yt.loaders.load_amr_grids + ~yt.loaders.load_particles + ~yt.loaders.load_octree + ~yt.loaders.load_hexahedral_mesh + ~yt.loaders.load_unstructured_mesh + ~yt.loaders.load_sample Derived Datatypes ----------------- From 7d4edf75c304e8169aa8107c4ab0ff019bb12aeb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 14:19:19 +0200 Subject: [PATCH 506/653] make validator a static method for testing --- yt/utilities/sample_data.py | 3 ++- yt/utilities/tests/test_load_sample.py | 6 ++++-- 2 files changed, 6 insertions(+), 3 deletions(-) diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py index a88919b4597..ae2653576fb 100644 --- a/yt/utilities/sample_data.py +++ b/yt/utilities/sample_data.py @@ -58,7 +58,8 @@ def __getitem__(self, item): return self._registry[item + ext] raise KeyError(item) - def _validate_sample_fname(self, name): + @staticmethod + def _validate_sample_fname(name): """ format name of sample data passed to function, accepts a named string argument and parses it to determine the sample data name, what type of diff --git a/yt/utilities/tests/test_load_sample.py b/yt/utilities/tests/test_load_sample.py index 0a2eaf62e6a..4966e2752d1 100644 --- a/yt/utilities/tests/test_load_sample.py +++ b/yt/utilities/tests/test_load_sample.py @@ -1,5 +1,5 @@ -from yt.loaders import _validate_sampledata_name from yt.testing import assert_equal +from yt.utilities.sample_data import PoochHandle names = { "t1": { @@ -47,7 +47,9 @@ def test_name_validator(): for test in names: - fileext, bname, ext = _validate_sampledata_name(names[test]["load_name"]) + fileext, bname, ext = PoochHandle._validate_sampledata_name( + names[test]["load_name"] + ) expected_answers = names[test]["answers"] assert_equal(fileext, expected_answers["fileext"]) assert_equal(bname, expected_answers["basename"]) From 9d965bccffa6ebcf638ed8bbdf6cdee9eeae7864 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 14:20:33 +0200 Subject: [PATCH 507/653] rename test file --- yt/utilities/tests/{test_load_sample.py => test_poochhandle.py} | 0 1 file changed, 0 insertions(+), 0 deletions(-) rename yt/utilities/tests/{test_load_sample.py => test_poochhandle.py} (100%) diff --git a/yt/utilities/tests/test_load_sample.py b/yt/utilities/tests/test_poochhandle.py similarity index 100% rename from yt/utilities/tests/test_load_sample.py rename to yt/utilities/tests/test_poochhandle.py From b188c26f8fc9237773dd63f74d77c716fa1b9d1b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 11 Aug 2020 19:08:52 +0200 Subject: [PATCH 508/653] fix a typo in test --- yt/utilities/tests/test_poochhandle.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/tests/test_poochhandle.py b/yt/utilities/tests/test_poochhandle.py index 4966e2752d1..f3ea1f4b210 100644 --- a/yt/utilities/tests/test_poochhandle.py +++ b/yt/utilities/tests/test_poochhandle.py @@ -47,7 +47,7 @@ def test_name_validator(): for test in names: - fileext, bname, ext = PoochHandle._validate_sampledata_name( + fileext, bname, ext = PoochHandle._validate_sample_fname( names[test]["load_name"] ) expected_answers = names[test]["answers"] From 7e6e47875cc810593308859c0a2937a3b581b06d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 08:04:12 +0200 Subject: [PATCH 509/653] flynting --- yt/frontends/stream/definitions.py | 2 +- yt/loaders.py | 4 ++-- yt/utilities/sample_data.py | 2 +- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/frontends/stream/definitions.py b/yt/frontends/stream/definitions.py index 659f0a35b55..671bd57ca9b 100644 --- a/yt/frontends/stream/definitions.py +++ b/yt/frontends/stream/definitions.py @@ -69,7 +69,7 @@ def assign_particle_data(ds, pdata, bbox): for ptype in ds.particle_types_raw: if (ptype, "particle_position_x") in pdata: - x, y, z = (pdata[ptype, "particle_position_%s" % ax] for ax in "xyz") + x, y, z = (pdata[ptype, f"particle_position_{ax}"] for ax in "xyz") elif (ptype, "particle_position") in pdata: x, y, z = pdata[ptype, "particle_position"].T else: diff --git a/yt/loaders.py b/yt/loaders.py index 5974bdefcca..2ab8c83bb37 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1364,12 +1364,12 @@ def load_sample(fn=None, specific_file=None, pbar=True): # right now work on loading only untarred files. build out h5 later mylog.info("Default to loading %s for %s dataset", file_lookup, name) loaded_file = os.path.join( - base_path, "%s.untar" % registered_fname, name, file_lookup + base_path, f"{registered_fname}.untar", name, file_lookup ) else: mylog.info("Loading %s for %s dataset", specific_file, name) loaded_file = os.path.join( - base_path, "%s.untar" % registered_fname, name, specific_file + base_path, f"{registered_fname}.untar", name, specific_file ) return load(loaded_file, **optional_args) diff --git a/yt/utilities/sample_data.py b/yt/utilities/sample_data.py index ae2653576fb..8ce9789fd9e 100644 --- a/yt/utilities/sample_data.py +++ b/yt/utilities/sample_data.py @@ -89,7 +89,7 @@ def _validate_sample_fname(name): # Right now we are assuming that any name passed without an explicit # extension is packed in a tarball. This logic can be modified later to # be more flexible. - fileext = "%s.tar.gz" % name + fileext = f"{name}.tar.gz" basename = name extension = "tar" elif ext == ".gz": From 3b4a436f6717cce1a3419f24a960b93d3e3284a1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 12:41:25 +0200 Subject: [PATCH 510/653] add a style check GH workflow --- .github/workflows/style-checks.yaml | 78 +++++++++++++++++++++++++++++ 1 file changed, 78 insertions(+) create mode 100644 .github/workflows/style-checks.yaml diff --git a/.github/workflows/style-checks.yaml b/.github/workflows/style-checks.yaml new file mode 100644 index 00000000000..aa794adfa5e --- /dev/null +++ b/.github/workflows/style-checks.yaml @@ -0,0 +1,78 @@ +name: Style Checks +on: [push] + +jobs: + flake8: + name: flake8 + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.8 + - name: install + id: install + run : pip install -r tests/lint_requirements.txt + + - name: check + id: flake8 + run: | + flake8 --version + flake8 yt/ + + black: + name: black + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.8 + - name: install + id: install + run : pip install -r tests/lint_requirements.txt + + - name: check + id: black + run: | + black --version + black --check --diff yt/ + + isort: + name: isort + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.8 + - name: install + id: install + run : pip install -r tests/lint_requirements.txt + + - name: check + id: isort + run: | + isort --version-number + isort . --check --diff + + flynt: + name: flynt + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: Setup Python + uses: actions/setup-python@master + with: + python-version: 3.8 + - name: install + id: install + run : pip install -r tests/lint_requirements.txt + - name: check + id: flynt + run: | + flynt --version + flynt yt --fail-on-change --dry-run --exclude yt/extern From c12b8b466d135552acb87d6df21398df4c72446c Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 14 Aug 2020 09:12:40 -0500 Subject: [PATCH 511/653] Flyntize --- yt/data_objects/selection_objects/base_objects.py | 8 ++++---- yt/data_objects/selection_objects/boolean_operations.py | 2 +- yt/data_objects/selection_objects/ray.py | 4 ++-- yt/data_objects/selection_objects/spheroids.py | 2 +- 4 files changed, 8 insertions(+), 8 deletions(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index 0fcd31f7092..d53c07a3ed4 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -67,7 +67,7 @@ def selector(self): if self._selector is not None: return self._selector s_module = getattr(self, "_selector_module", yt.geometry.selection_routines) - sclass = getattr(s_module, "%s_selector" % self._type_name, None) + sclass = getattr(s_module, f"{self._type_name}_selector", None) if sclass is None: raise YTDataSelectorNotImplemented(self._type_name) @@ -1235,7 +1235,7 @@ def extract_isocontours( for v1 in verts: f.write("v %0.16e %0.16e %0.16e\n" % (v1[0], v1[1], v1[2])) for i in range(len(verts) // 3): - f.write("f %s %s %s\n" % (i * 3 + 1, i * 3 + 2, i * 3 + 3)) + f.write(f"f {i * 3 + 1} {i * 3 + 2} {i * 3 + 3}\n") if not hasattr(filename, "write"): f.close() if sample_values is not None: @@ -1395,8 +1395,8 @@ def extract_connected_sets( if cid == -1: continue contours[level][cid] = base_object.cut_region( - ["obj['contours_%s'] == %s" % (contour_key, cid)], - {"contour_slices_%s" % contour_key: cids}, + [f"obj['contours_{contour_key}'] == {cid}"], + {f"contour_slices_{contour_key}": cids}, ) return cons, contours diff --git a/yt/data_objects/selection_objects/boolean_operations.py b/yt/data_objects/selection_objects/boolean_operations.py index 65fd120ffbc..8dae00feecb 100644 --- a/yt/data_objects/selection_objects/boolean_operations.py +++ b/yt/data_objects/selection_objects/boolean_operations.py @@ -50,7 +50,7 @@ def __init__( self.op = op.upper() self.dobj1 = dobj1 self.dobj2 = dobj2 - name = "Boolean%sSelector" % (self.op,) + name = f"Boolean{self.op}Selector" sel_cls = getattr(yt.geometry.selection_routines, name) self._selector = sel_cls(self) diff --git a/yt/data_objects/selection_objects/ray.py b/yt/data_objects/selection_objects/ray.py index d799081ae56..21cb9e360e8 100644 --- a/yt/data_objects/selection_objects/ray.py +++ b/yt/data_objects/selection_objects/ray.py @@ -87,8 +87,8 @@ def __init__(self, axis, coords, ds=None, field_parameters=None, data_source=Non self.px_ax = xax self.py_ax = yax # Even though we may not be using x,y,z we use them here. - self.px_dx = "d%s" % ("xyz"[self.px_ax]) - self.py_dx = "d%s" % ("xyz"[self.py_ax]) + self.px_dx = f"d{'xyz'[self.px_ax]}" + self.py_dx = f"d{'xyz'[self.py_ax]}" # Convert coordinates to code length. if isinstance(coords[0], YTQuantity): self.px = self.ds.quan(coords[0]).to("code_length") diff --git a/yt/data_objects/selection_objects/spheroids.py b/yt/data_objects/selection_objects/spheroids.py index 8b1a0ee6086..014ec012d24 100644 --- a/yt/data_objects/selection_objects/spheroids.py +++ b/yt/data_objects/selection_objects/spheroids.py @@ -105,7 +105,7 @@ def __init__(self, points, ds=None, field_parameters=None, data_source=None): points = fix_length(points, ds) if len(points) < 2: raise YTException( - "Not enough points. Expected at least 2, got %s" % len(points) + f"Not enough points. Expected at least 2, got {len(points)}" ) mylog.debug("Building minimal sphere around points.") mb = _miniball.Miniball(points) From d934fbba034467132d7dea3c1bad7676c0299b73 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 16:28:56 +0200 Subject: [PATCH 512/653] correct import Co-authored-by: Britton Smith --- yt/frontends/halo_catalog/tests/test_outputs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index b2e033a6880..c4b1416a712 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -1,6 +1,6 @@ import numpy as np -from yt.frontends.halo_catalog.data_structures import HaloCatalogDataset +from yt.frontends.halo_catalog.data_structures import YTHaloCatalogDataset from yt.frontends.ytdata.utilities import save_as_dataset from yt.loaders import load as yt_load from yt.testing import TempDirTest, assert_array_equal, requires_module From 45a2d5254ab3849a0124e5a740ceeebc9354db77 Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Fri, 14 Aug 2020 15:32:45 +0100 Subject: [PATCH 513/653] Update yt/frontends/halo_catalog/tests/test_outputs.py --- yt/frontends/halo_catalog/tests/test_outputs.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/yt/frontends/halo_catalog/tests/test_outputs.py b/yt/frontends/halo_catalog/tests/test_outputs.py index c4b1416a712..996c5b0b29f 100644 --- a/yt/frontends/halo_catalog/tests/test_outputs.py +++ b/yt/frontends/halo_catalog/tests/test_outputs.py @@ -3,7 +3,13 @@ from yt.frontends.halo_catalog.data_structures import YTHaloCatalogDataset from yt.frontends.ytdata.utilities import save_as_dataset from yt.loaders import load as yt_load -from yt.testing import TempDirTest, assert_array_equal, requires_module +from yt.testing import ( + TempDirTest, + assert_array_equal, + assert_equal, + requires_file, + requires_module, +) from yt.units.yt_array import YTArray, YTQuantity from yt.utilities.answer_testing.framework import data_dir_load From 2a111733b49dd0ddb31655012c1d2ef6ea119c72 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Tue, 30 Jun 2020 13:05:32 -0400 Subject: [PATCH 514/653] Fix periodicity for on-axis SPH projections --- .../coordinates/cartesian_coordinates.py | 6 +- yt/utilities/lib/pixelization_routines.pyx | 136 ++++++++++++------ 2 files changed, 92 insertions(+), 50 deletions(-) diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 32b8f0d71a4..facec1743ba 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -365,7 +365,7 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, + bnds, check_period=int(periodic), period=period ) # We use code length here, but to get the path length right # we need to multiply by the conversion factor between @@ -392,7 +392,7 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, + bnds, check_period=int(periodic), period=period, weight_field=chunk[weight].in_units(wounits), ) mylog.info( @@ -411,7 +411,7 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[weight].in_units(wounits), - bnds, + bnds, check_period=int(periodic), period=period, ) normalization_2d_utility(buff, weight_buff) elif isinstance(data_source, YTSlice): diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index a5e5dd31700..31af993fc1e 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -987,19 +987,32 @@ def pixelize_sph_kernel_projection( np.float64_t[:] quantity_to_smooth, bounds, kernel_name="cubic", - weight_field=None): + weight_field=None, + int check_period = 1, + period=None): cdef np.intp_t xsize, ysize cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j cdef np.int64_t xi, yi, x0, x1, y0, y1 cdef np.float64_t q_ij2, posx_diff, posy_diff, ih_j2 - cdef np.float64_t x, y, dx, dy, idx, idy, h_j2 + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, px, py + cdef np.float64_t period_x, period_y cdef int index, i, j cdef np.float64_t[:] _weight_field + cdef int xiter[2] + cdef int yiter[2] + cdef np.float64_t xiterv[2] + cdef np.float64_t yiterv[2] if weight_field is not None: _weight_field = weight_field + xiter[0] = yiter[0] = 0 + xiterv[0] = yiterv[0] = 0.0 + if period is not None: + period_x = period[0] + period_y = period[1] + # we find the x and y range over which we have pixels and we find how many # pixels we have in each dimension xsize, ysize = buff.shape[0], buff.shape[1] @@ -1024,52 +1037,80 @@ def pixelize_sph_kernel_projection( if j % 100000 == 0: with gil: PyErr_CheckSignals() + + xiter[1] = yiter[1] = 999 + + px = posx[j] + py = posy[j] - # here we find the pixels which this particle contributes to - x0 = ( (posx[j] - hsml[j] - x_min) * idx) - x1 = ( (posx[j] + hsml[j] - x_min) * idx) - x0 = iclip(x0-1, 0, xsize) - x1 = iclip(x1+1, 0, xsize) - - y0 = ( (posy[j] - hsml[j] - y_min) * idy) - y1 = ( (posy[j] + hsml[j] - y_min) * idy) - y0 = iclip(y0-1, 0, ysize) - y1 = iclip(y1+1, 0, ysize) - - # we set the smoothing length squared with lower limit of the pixel - h_j2 = fmax(hsml[j]*hsml[j], dx*dy) - ih_j2 = 1.0/h_j2 - - prefactor_j = pmass[j] / pdens[j] / hsml[j]**2 - if weight_field is None: - prefactor_j *= quantity_to_smooth[j] - else: - prefactor_j *= quantity_to_smooth[j] * _weight_field[j] - - # found pixels we deposit on, loop through those pixels - for xi in range(x0, x1): - # we use the centre of the pixel to calculate contribution - x = (xi + 0.5) * dx + x_min - - posx_diff = posx[j] - x - posx_diff = posx_diff * posx_diff - - if posx_diff > h_j2: continue - - for yi in range(y0, y1): - y = (yi + 0.5) * dy + y_min - - posy_diff = posy[j] - y - posy_diff = posy_diff * posy_diff - if posy_diff > h_j2: continue - - q_ij2 = (posx_diff + posy_diff) * ih_j2 - if q_ij2 >= 1: - continue + if check_period == 1: + if px - hsml[j] < x_min: + xiter[1] = +1 + xiterv[1] = period_x + elif px + hsml[j] > x_max: + xiter[1] = -1 + xiterv[1] = -period_x + if py - hsml[j] < y_min: + yiter[1] = +1 + yiterv[1] = period_y + elif py + hsml[j] > y_max: + yiter[1] = -1 + yiterv[1] = -period_y - # see equation 32 of the SPLASH paper - # now we just use the kernel projection - buff[xi, yi] += prefactor_j * itab.interpolate(q_ij2) + for xi in range(2): + if xiter[xi] == 999: continue + px = posx[j] + xiterv[xi] + if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue + for yi in range(2): + if yiter[yi] == 999: continue + py = posy[j] + yiterv[yi] + if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue + + # here we find the pixels which this particle contributes to + x0 = ((px - hsml[j] - x_min)*idx) + x1 = ((px + hsml[j] - x_min)*idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ((py - hsml[j] - y_min)*idy) + y1 = ((py + hsml[j] - y_min)*idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + # we set the smoothing length squared with lower limit of the pixel + h_j2 = fmax(hsml[j]*hsml[j], dx*dy) + ih_j2 = 1.0/h_j2 + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**2 + if weight_field is None: + prefactor_j *= quantity_to_smooth[j] + else: + prefactor_j *= quantity_to_smooth[j] * _weight_field[j] + + # found pixels we deposit on, loop through those pixels + for xi in range(x0, x1): + # we use the centre of the pixel to calculate contribution + x = (xi + 0.5) * dx + x_min + + posx_diff = px - x + posx_diff = posx_diff * posx_diff + + if posx_diff > h_j2: continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = py - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: continue + + q_ij2 = (posx_diff + posy_diff) * ih_j2 + if q_ij2 >= 1: + continue + + # see equation 32 of the SPLASH paper + # now we just use the kernel projection + buff[xi, yi] += prefactor_j * itab.interpolate(q_ij2) @cython.boundscheck(False) @cython.wraparound(False) @@ -1591,7 +1632,8 @@ def off_axis_projection_SPH(np.float64_t[:] px, quantity_to_smooth, [rot_bounds_x0, rot_bounds_x1, rot_bounds_y0, rot_bounds_y1], - weight_field=weight_field) + weight_field=weight_field, + check_period=0) @cython.boundscheck(False) From c27ea19b5aa752d52115182f0d8ffe06a5ffe6ac Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Tue, 30 Jun 2020 14:46:36 -0400 Subject: [PATCH 515/653] Simpler --- yt/utilities/lib/pixelization_routines.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 31af993fc1e..f5fdcecc252 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -1059,11 +1059,11 @@ def pixelize_sph_kernel_projection( for xi in range(2): if xiter[xi] == 999: continue - px = posx[j] + xiterv[xi] + px += xiterv[xi] if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue for yi in range(2): if yiter[yi] == 999: continue - py = posy[j] + yiterv[yi] + py += yiterv[yi] if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue # here we find the pixels which this particle contributes to From cbd0c01d670b1326ac2d2c6e76335c10dc8967a6 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Mon, 6 Jul 2020 17:28:50 -0400 Subject: [PATCH 516/653] Do this for slices and arbitrary grids also --- .../construction_data_containers.py | 10 + .../coordinates/cartesian_coordinates.py | 4 +- yt/utilities/lib/pixelization_routines.pyx | 287 ++++++++++++------ 3 files changed, 204 insertions(+), 97 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 87996ef9d69..88dc1eb237a 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -871,6 +871,12 @@ def _fill_sph_particles(self, fields): bounds, size = self._get_grid_bounds_size() + period = self.ds.coordinates.period.copy() + if hasattr(period, 'in_units'): + period = period.in_units("code_length").d + # TODO maybe there is a better way of handling this + periodic = any(self.ds.periodicity) + if smoothing_style == "scatter": for field in fields: fi = self.ds._get_field_info(field) @@ -902,6 +908,8 @@ def _fill_sph_particles(self, fields): field_quantity, bounds, pbar=pbar, + check_period=int(periodic), + period=period ) if normalize: pixelize_sph_kernel_arbitrary_grid( @@ -915,6 +923,8 @@ def _fill_sph_particles(self, fields): np.ones(dens.shape[0]), bounds, pbar=pbar, + check_period=int(periodic), + period=period ) if normalize: diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index facec1743ba..2d8329dd14b 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -432,7 +432,7 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, + bnds, check_period=int(periodic), period=period ) if normalize: pixelize_sph_kernel_slice( @@ -443,7 +443,7 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), np.ones(chunk[ptype, "density"].shape[0]), - bnds, + bnds, check_period=int(periodic), period=period ) if normalize: diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index f5fdcecc252..86ef8e27437 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -988,7 +988,7 @@ def pixelize_sph_kernel_projection( bounds, kernel_name="cubic", weight_field=None, - int check_period = 1, + int check_period=1, period=None): cdef np.intp_t xsize, ysize @@ -1286,15 +1286,28 @@ def pixelize_sph_kernel_slice( np.float64_t[:] hsml, np.float64_t[:] pmass, np.float64_t[:] pdens, np.float64_t[:] quantity_to_smooth, - bounds, kernel_name="cubic"): + bounds, kernel_name="cubic", + int check_period=1, + period=None): # similar method to pixelize_sph_kernel_projection cdef np.intp_t xsize, ysize cdef np.float64_t x_min, x_max, y_min, y_max, prefactor_j cdef np.int64_t xi, yi, x0, x1, y0, y1 cdef np.float64_t q_ij, posx_diff, posy_diff, ih_j - cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j + cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j, px, py cdef int index, i, j + cdef np.float64_t period_x, period_y + cdef int xiter[2] + cdef int yiter[2] + cdef np.float64_t xiterv[2] + cdef np.float64_t yiterv[2] + + xiter[0] = yiter[0] = 0 + xiterv[0] = yiterv[0] = 0.0 + if period is not None: + period_x = period[0] + period_y = period[1] xsize, ysize = buff.shape[0], buff.shape[1] @@ -1316,48 +1329,76 @@ def pixelize_sph_kernel_slice( with gil: PyErr_CheckSignals() - x0 = ( (posx[j] - hsml[j] - x_min) * idx) - x1 = ( (posx[j] + hsml[j] - x_min) * idx) - x0 = iclip(x0-1, 0, xsize) - x1 = iclip(x1+1, 0, xsize) - - y0 = ( (posy[j] - hsml[j] - y_min) * idy) - y1 = ( (posy[j] + hsml[j] - y_min) * idy) - y0 = iclip(y0-1, 0, ysize) - y1 = iclip(y1+1, 0, ysize) - - h_j2 = fmax(hsml[j]*hsml[j], dx*dy) - h_j = math.sqrt(h_j2) - ih_j = 1.0/h_j - - prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 - prefactor_j *= quantity_to_smooth[j] - - # Now we know which pixels to deposit onto for this particle, - # so loop over them and add this particle's contribution - for xi in range(x0, x1): - x = (xi + 0.5) * dx + x_min - - posx_diff = posx[j] - x - posx_diff = posx_diff * posx_diff - if posx_diff > h_j2: - continue + xiter[1] = yiter[1] = 999 - for yi in range(y0, y1): - y = (yi + 0.5) * dy + y_min + px = posx[j] + py = posy[j] - posy_diff = posy[j] - y - posy_diff = posy_diff * posy_diff - if posy_diff > h_j2: - continue + if check_period == 1: + if px - hsml[j] < x_min: + xiter[1] = +1 + xiterv[1] = period_x + elif px + hsml[j] > x_max: + xiter[1] = -1 + xiterv[1] = -period_x + if py - hsml[j] < y_min: + yiter[1] = +1 + yiterv[1] = period_y + elif py + hsml[j] > y_max: + yiter[1] = -1 + yiterv[1] = -period_y - # see equation 4 of the SPLASH paper - q_ij = math.sqrt(posx_diff + posy_diff) * ih_j - if q_ij >= 1: - continue + for xi in range(2): + if xiter[xi] == 999: continue + px += xiterv[xi] + if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue + for yi in range(2): + if yiter[yi] == 999: continue + py += yiterv[yi] + if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue - # see equations 6, 9, and 11 of the SPLASH paper - buff[xi, yi] += prefactor_j * kernel_func(q_ij) + x0 = ( (px - hsml[j] - x_min) * idx) + x1 = ( (px + hsml[j] - x_min) * idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ( (py - hsml[j] - y_min) * idy) + y1 = ( (py + hsml[j] - y_min) * idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + h_j2 = fmax(hsml[j]*hsml[j], dx*dy) + h_j = math.sqrt(h_j2) + ih_j = 1.0/h_j + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 + prefactor_j *= quantity_to_smooth[j] + + # Now we know which pixels to deposit onto for this particle, + # so loop over them and add this particle's contribution + for xi in range(x0, x1): + x = (xi + 0.5) * dx + x_min + + posx_diff = px - x + posx_diff = posx_diff * posx_diff + if posx_diff > h_j2: + continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = py - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: + continue + + # see equation 4 of the SPLASH paper + q_ij = math.sqrt(posx_diff + posy_diff) * ih_j + if q_ij >= 1: + continue + + # see equations 6, 9, and 11 of the SPLASH paper + buff[xi, yi] += prefactor_j * kernel_func(q_ij) @cython.initializedcheck(False) @cython.boundscheck(False) @@ -1368,14 +1409,31 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, np.float64_t[:] hsml, np.float64_t[:] pmass, np.float64_t[:] pdens, np.float64_t[:] quantity_to_smooth, - bounds, pbar=None, kernel_name="cubic"): + bounds, pbar=None, kernel_name="cubic", + int check_period=1, period=None): cdef np.intp_t xsize, ysize, zsize cdef np.float64_t x_min, x_max, y_min, y_max, z_min, z_max, prefactor_j cdef np.int64_t xi, yi, zi, x0, x1, y0, y1, z0, z1 - cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff + cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff, px, py, pz cdef np.float64_t x, y, z, dx, dy, dz, idx, idy, idz, h_j3, h_j2, h_j, ih_j cdef int index, i, j, k + cdef np.float64_t period_x, period_y, period_z + + cdef int xiter[2] + cdef int yiter[2] + cdef int ziter[2] + cdef np.float64_t xiterv[2] + cdef np.float64_t yiterv[2] + cdef np.float64_t ziterv[2] + + xiter[0] = yiter[0] = ziter[0] = 0 + xiterv[0] = yiterv[0] = ziterv[0] = 0.0 + + if period is not None: + period_x = period[0] + period_y = period[1] + period_z = period[2] xsize, ysize, zsize = buff.shape[0], buff.shape[1], buff.shape[2] x_min = bounds[0] @@ -1402,61 +1460,100 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, pbar.update(50000) PyErr_CheckSignals() - x0 = ( (posx[j] - hsml[j] - x_min) * idx) - x1 = ( (posx[j] + hsml[j] - x_min) * idx) - x0 = iclip(x0-1, 0, xsize) - x1 = iclip(x1+1, 0, xsize) - - y0 = ( (posy[j] - hsml[j] - y_min) * idy) - y1 = ( (posy[j] + hsml[j] - y_min) * idy) - y0 = iclip(y0-1, 0, ysize) - y1 = iclip(y1+1, 0, ysize) - - z0 = ( (posz[j] - hsml[j] - z_min) * idz) - z1 = ( (posz[j] + hsml[j] - z_min) * idz) - z0 = iclip(z0-1, 0, zsize) - z1 = iclip(z1+1, 0, zsize) - - h_j3 = fmax(hsml[j]*hsml[j]*hsml[j], dx*dy*dz) - h_j = math.cbrt(h_j3) - h_j2 = h_j*h_j - ih_j = 1/h_j - - prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 - prefactor_j *= quantity_to_smooth[j] - - # Now we know which voxels to deposit onto for this particle, - # so loop over them and add this particle's contribution - for xi in range(x0, x1): - x = (xi + 0.5) * dx + x_min - - posx_diff = posx[j] - x - posx_diff = posx_diff * posx_diff - if posx_diff > h_j2: - continue - - for yi in range(y0, y1): - y = (yi + 0.5) * dy + y_min - - posy_diff = posy[j] - y - posy_diff = posy_diff * posy_diff - if posy_diff > h_j2: - continue - - for zi in range(z0, z1): - z = (zi + 0.5) * dz + z_min + xiter[1] = yiter[1] = ziter[1] = 999 - posz_diff = posz[j] - z - posz_diff = posz_diff * posz_diff - if posz_diff > h_j2: - continue + px = posx[j] + py = posy[j] + pz = posz[j] - # see equation 4 of the SPLASH paper - q_ij = math.sqrt(posx_diff + posy_diff + posz_diff) * ih_j - if q_ij >= 1: - continue + if check_period == 1: + if px - hsml[j] < x_min: + xiter[1] = +1 + xiterv[1] = period_x + elif px + hsml[j] > x_max: + xiter[1] = -1 + xiterv[1] = -period_x + if py - hsml[j] < y_min: + yiter[1] = +1 + yiterv[1] = period_y + elif py + hsml[j] > y_max: + yiter[1] = -1 + yiterv[1] = -period_y + if pz - hsml[j] < z_min: + ziter[1] = +1 + ziterv[1] = period_z + elif pz + hsml[j] > z_max: + ziter[1] = -1 + ziterv[1] = -period_z - buff[xi, yi, zi] += prefactor_j * kernel_func(q_ij) + for xi in range(2): + if xiter[xi] == 999: continue + px += xiterv[xi] + if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue + for yi in range(2): + if yiter[yi] == 999: continue + py += yiterv[yi] + if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue + for zi in range(2): + if ziter[zi] == 999: continue + pz += ziterv[zi] + if (pz + hsml[j] < z_min) or (pz - hsml[j] > z_max): continue + + x0 = ( (px - hsml[j] - x_min) * idx) + x1 = ( (px + hsml[j] - x_min) * idx) + x0 = iclip(x0-1, 0, xsize) + x1 = iclip(x1+1, 0, xsize) + + y0 = ( (py - hsml[j] - y_min) * idy) + y1 = ( (py + hsml[j] - y_min) * idy) + y0 = iclip(y0-1, 0, ysize) + y1 = iclip(y1+1, 0, ysize) + + z0 = ( (pz - hsml[j] - z_min) * idz) + z1 = ( (pz + hsml[j] - z_min) * idz) + z0 = iclip(z0-1, 0, zsize) + z1 = iclip(z1+1, 0, zsize) + + h_j3 = fmax(hsml[j]*hsml[j]*hsml[j], dx*dy*dz) + h_j = math.cbrt(h_j3) + h_j2 = h_j*h_j + ih_j = 1/h_j + + prefactor_j = pmass[j] / pdens[j] / hsml[j]**3 + prefactor_j *= quantity_to_smooth[j] + + # Now we know which voxels to deposit onto for this particle, + # so loop over them and add this particle's contribution + for xi in range(x0, x1): + x = (xi + 0.5) * dx + x_min + + posx_diff = px - x + posx_diff = posx_diff * posx_diff + if posx_diff > h_j2: + continue + + for yi in range(y0, y1): + y = (yi + 0.5) * dy + y_min + + posy_diff = py - y + posy_diff = posy_diff * posy_diff + if posy_diff > h_j2: + continue + + for zi in range(z0, z1): + z = (zi + 0.5) * dz + z_min + + posz_diff = pz - z + posz_diff = posz_diff * posz_diff + if posz_diff > h_j2: + continue + + # see equation 4 of the SPLASH paper + q_ij = math.sqrt(posx_diff + posy_diff + posz_diff) * ih_j + if q_ij >= 1: + continue + + buff[xi, yi, zi] += prefactor_j * kernel_func(q_ij) def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, np.ndarray[np.int64_t, ndim=2] conn, From 4bd77f00766532b481bc8b1c644b48c7cc803b2c Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Sat, 25 Jul 2020 11:40:01 -0400 Subject: [PATCH 517/653] Running black --- .../construction_data_containers.py | 6 +++--- .../coordinates/cartesian_coordinates.py | 20 ++++++++++++++----- 2 files changed, 18 insertions(+), 8 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 88dc1eb237a..26c2899aedc 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -872,7 +872,7 @@ def _fill_sph_particles(self, fields): bounds, size = self._get_grid_bounds_size() period = self.ds.coordinates.period.copy() - if hasattr(period, 'in_units'): + if hasattr(period, "in_units"): period = period.in_units("code_length").d # TODO maybe there is a better way of handling this periodic = any(self.ds.periodicity) @@ -909,7 +909,7 @@ def _fill_sph_particles(self, fields): bounds, pbar=pbar, check_period=int(periodic), - period=period + period=period, ) if normalize: pixelize_sph_kernel_arbitrary_grid( @@ -924,7 +924,7 @@ def _fill_sph_particles(self, fields): bounds, pbar=pbar, check_period=int(periodic), - period=period + period=period, ) if normalize: diff --git a/yt/geometry/coordinates/cartesian_coordinates.py b/yt/geometry/coordinates/cartesian_coordinates.py index 2d8329dd14b..be527fd40e1 100644 --- a/yt/geometry/coordinates/cartesian_coordinates.py +++ b/yt/geometry/coordinates/cartesian_coordinates.py @@ -365,7 +365,9 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, check_period=int(periodic), period=period + bnds, + check_period=int(periodic), + period=period, ) # We use code length here, but to get the path length right # we need to multiply by the conversion factor between @@ -392,7 +394,9 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, check_period=int(periodic), period=period, + bnds, + check_period=int(periodic), + period=period, weight_field=chunk[weight].in_units(wounits), ) mylog.info( @@ -411,7 +415,9 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[weight].in_units(wounits), - bnds, check_period=int(periodic), period=period, + bnds, + check_period=int(periodic), + period=period, ) normalization_2d_utility(buff, weight_buff) elif isinstance(data_source, YTSlice): @@ -432,7 +438,9 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), chunk[field].in_units(ounits), - bnds, check_period=int(periodic), period=period + bnds, + check_period=int(periodic), + period=period, ) if normalize: pixelize_sph_kernel_slice( @@ -443,7 +451,9 @@ def _ortho_pixelize( chunk[ptype, "mass"].to("code_mass"), chunk[ptype, "density"].to("code_density"), np.ones(chunk[ptype, "density"].shape[0]), - bnds, check_period=int(periodic), period=period + bnds, + check_period=int(periodic), + period=period, ) if normalize: From f57b7c71eafec5ab1ee39091574a169c493c2a21 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Sat, 25 Jul 2020 11:41:44 -0400 Subject: [PATCH 518/653] Fix whitespace --- yt/utilities/lib/pixelization_routines.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 86ef8e27437..c98d7623280 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -1729,7 +1729,7 @@ def off_axis_projection_SPH(np.float64_t[:] px, quantity_to_smooth, [rot_bounds_x0, rot_bounds_x1, rot_bounds_y0, rot_bounds_y1], - weight_field=weight_field, + weight_field=weight_field, check_period=0) From 377d218e6d900a6f29181abc8533dba94d87dfc2 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Sat, 25 Jul 2020 12:44:23 -0400 Subject: [PATCH 519/653] These need to be copies, otherwise they get overwritten in the region --- yt/data_objects/selection_data_containers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/selection_data_containers.py b/yt/data_objects/selection_data_containers.py index 4c501862e5c..f2a8992bc05 100644 --- a/yt/data_objects/selection_data_containers.py +++ b/yt/data_objects/selection_data_containers.py @@ -773,7 +773,7 @@ def _get_bbox(self): """ Return the minimum bounding box for the region. """ - return self.left_edge, self.right_edge + return self.left_edge.copy(), self.right_edge.copy() class YTDataCollection(YTSelectionContainer3D): From d722e0b3241e23ce0cdc0a91ae0ecaa9e1175cf9 Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Wed, 5 Aug 2020 23:08:40 -0400 Subject: [PATCH 520/653] Bump test numbers --- tests/tests.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index f8a34668ecf..b96a170c014 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -12,7 +12,7 @@ answer_tests: - yt/frontends/amrvac/tests/test_outputs.py:test_riemann_cartesian_175D - yt/frontends/amrvac/tests/test_outputs.py:test_rmi_cartesian_dust_2D - local_arepo_005: + local_arepo_006: - yt/frontends/arepo/tests/test_outputs.py:test_arepo_bullet - yt/frontends/arepo/tests/test_outputs.py:test_arepo_tng59 - yt/frontends/arepo/tests/test_outputs.py:test_index_override @@ -59,7 +59,7 @@ answer_tests: - yt/frontends/flash/tests/test_outputs.py:test_wind_tunnel - yt/frontends/flash/tests/test_outputs.py:test_fid_1to3_b1 - local_gadget_003: + local_gadget_004: - yt/frontends/gadget/tests/test_outputs.py:test_iso_collapse - yt/frontends/gadget/tests/test_outputs.py:test_pid_uniqueness - yt/frontends/gadget/tests/test_outputs.py:test_bigendian_field_access @@ -73,7 +73,7 @@ answer_tests: local_gdf_001: - yt/frontends/gdf/tests/test_outputs.py:test_sedov_tunnel - local_gizmo_004: + local_gizmo_005: - yt/frontends/gizmo/tests/test_outputs.py:test_gizmo_64 local_halos_009: @@ -82,7 +82,7 @@ answer_tests: - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42 - local_owls_004: + local_owls_005: - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter @@ -94,7 +94,7 @@ answer_tests: - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers - yt/visualization/tests/test_raw_field_slices.py:test_raw_field_slices - local_tipsy_005: + local_tipsy_006: - yt/frontends/tipsy/tests/test_outputs.py:test_pkdgrav - yt/frontends/tipsy/tests/test_outputs.py:test_gasoline_dmonly - yt/frontends/tipsy/tests/test_outputs.py:test_tipsy_galaxy From 2b6c787c0ef7d54929c5cf83f589c3bb931cbd6f Mon Sep 17 00:00:00 2001 From: John ZuHone Date: Fri, 14 Aug 2020 11:11:42 -0400 Subject: [PATCH 521/653] Make this simpler --- yt/data_objects/construction_data_containers.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 26c2899aedc..18da6d4b35d 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -875,7 +875,7 @@ def _fill_sph_particles(self, fields): if hasattr(period, "in_units"): period = period.in_units("code_length").d # TODO maybe there is a better way of handling this - periodic = any(self.ds.periodicity) + is_periodic = int(any(self.ds.periodicity)) if smoothing_style == "scatter": for field in fields: @@ -908,7 +908,7 @@ def _fill_sph_particles(self, fields): field_quantity, bounds, pbar=pbar, - check_period=int(periodic), + check_period=is_periodic, period=period, ) if normalize: @@ -923,7 +923,7 @@ def _fill_sph_particles(self, fields): np.ones(dens.shape[0]), bounds, pbar=pbar, - check_period=int(periodic), + check_period=is_periodic, period=period, ) From 89f245f454d3f5d9b65129c3a7e619b99852b0ce Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 13:58:09 +0200 Subject: [PATCH 522/653] remove linting stages from travis --- .travis.yml | 74 ++++++++++++----------------------------------------- 1 file changed, 17 insertions(+), 57 deletions(-) diff --git a/.travis.yml b/.travis.yml index f56a39376f4..2e7540231af 100644 --- a/.travis.yml +++ b/.travis.yml @@ -51,61 +51,26 @@ install: $PIP install --upgrade setuptools echo "Travis build stage: $TRAVIS_BUILD_STAGE_NAME" # install dependencies yt - if [[ ${TRAVIS_BUILD_STAGE_NAME} == "lint" ]]; then - export TRAVIS_BUILD_STAGE_NAME="Lint" - fi - if [[ ${TRAVIS_BUILD_STAGE_NAME} != "Lint" ]]; then - if [[ $MINIMAL == 1 ]]; then - # Ensure numpy and cython are installed so dependencies that need to be built - # don't error out - # The first numpy to support py3.6 is 1.12, but numpy 1.13 matches - # unyt so we'll match it here. - $PIP install numpy==1.13.3 cython==0.26.1 - $PIP install -r tests/test_minimal_requirements.txt - else - # Getting cartopy installed requires getting cython and numpy installed - # first; this is potentially going to be fixed with the inclusion of - # pyproject.toml in cartopy. - # These versions are pinned, so we will need to update/remove them when - # the hack is no longer necessary. - $PIP install numpy==1.18.1 cython==0.29.14 - CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" $PIP install -r tests/test_requirements.txt - fi - $PIP install -e . + if [[ $MINIMAL == 1 ]]; then + # Ensure numpy and cython are installed so dependencies that need to be built + # don't error out + # The first numpy to support py3.6 is 1.12, but numpy 1.13 matches + # unyt so we'll match it here. + $PIP install numpy==1.13.3 cython==0.26.1 + $PIP install -r tests/test_minimal_requirements.txt else - $PIP install -r tests/lint_requirements.txt + # Getting cartopy installed requires getting cython and numpy installed + # first; this is potentially going to be fixed with the inclusion of + # pyproject.toml in cartopy. + # These versions are pinned, so we will need to update/remove them when + # the hack is no longer necessary. + $PIP install numpy==1.18.1 cython==0.29.14 + CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" $PIP install -r tests/test_requirements.txt fi + $PIP install -e . jobs: include: - - stage: Lint - name: "flake8" - python: 3.6 - script: | - flake8 --version - flake8 yt/ - - - stage: Lint - name: "isort" - python: 3.6 - script: | - isort --version - isort . --check --diff - - - stage: Lint - name: "black" - python: 3.6 - script: | - black --version - black --check yt/ - - - stage: Lint - name: "flynt" - python: 3.6 - script: | - flynt --version - flynt yt/ --fail-on-change --dry-run -e yt/extern - - stage: tests name: "Python: 3.6 Minimal Dependency Unit Tests" python: 3.6 @@ -143,10 +108,5 @@ jobs: script: nosetests -c nose_unit.cfg after_success: - - | - if [[ ${TRAVIS_BUILD_STAGE_NAME} == "lint" ]]; then - export TRAVIS_BUILD_STAGE_NAME="Lint" - fi - if [[ ${TRAVIS_BUILD_STAGE_NAME} != "Lint" ]]; then - codecov - fi + - codecov + From 3fbc701ba22fef822ae6435b9e16b0566419ef18 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 18:59:16 +0200 Subject: [PATCH 523/653] fix flake8 --- yt/loaders.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/loaders.py b/yt/loaders.py index 2212ddeca47..fecda01bed2 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -70,7 +70,8 @@ def load(fn, *args, **kwargs): return DatasetSeries(fn, *args, **kwargs) - # Unless the dataset starts with http, look for it using the path or relative to the data dir (in this order). + # Unless the dataset starts with http + # look for it using the path or relative to the data dir (in this order). if not (os.path.exists(fn) or fn.startswith("http")): data_dir = ytcfg.get("yt", "test_data_dir") alt_fn = os.path.join(data_dir, fn) From 956cbc4bd76d5c6c05b975c1daed9f4d0d41eca8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 21:44:31 +0200 Subject: [PATCH 524/653] fix import --- yt/frontends/ytdata/tests/test_unit.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/yt/frontends/ytdata/tests/test_unit.py b/yt/frontends/ytdata/tests/test_unit.py index 976ce1eb57d..5a697644fbf 100644 --- a/yt/frontends/ytdata/tests/test_unit.py +++ b/yt/frontends/ytdata/tests/test_unit.py @@ -4,8 +4,7 @@ import numpy as np -from yt.convenience import load -from yt.loaders import load_uniform_grid +from yt.loaders import load, load_uniform_grid from yt.testing import ( assert_array_equal, assert_fname, From 4419827463b2d10be7d5eea175e59f6e2c037337 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 22:04:30 +0200 Subject: [PATCH 525/653] [ci skip] missing empty line --- .github/workflows/style-checks.yaml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/style-checks.yaml b/.github/workflows/style-checks.yaml index aa794adfa5e..54cd19b5ad2 100644 --- a/.github/workflows/style-checks.yaml +++ b/.github/workflows/style-checks.yaml @@ -71,6 +71,7 @@ jobs: - name: install id: install run : pip install -r tests/lint_requirements.txt + - name: check id: flynt run: | From 95cc83c437e54f019eb662391980b30bb907c42d Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 14 Aug 2020 16:52:02 -0500 Subject: [PATCH 526/653] Fix stray YTSelectionObject --- yt/frontends/halo_catalog/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 52fae36e5c7..6a06dbb62d4 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -5,7 +5,7 @@ import numpy as np -from yt.data_objects.data_containers import YTSelectionContainer +from yt.data_objects.selection_objects.base_objects import YTSelectionContainer from yt.data_objects.static_output import ( ParticleDataset, ParticleFile, From 886e83380cc67863566f7a2da9136de2dc558b83 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 09:24:29 +0200 Subject: [PATCH 527/653] remove deprecated test file --- yt/geometry/tests/test_neighbor_search.py | 43 ----------------------- 1 file changed, 43 deletions(-) delete mode 100644 yt/geometry/tests/test_neighbor_search.py diff --git a/yt/geometry/tests/test_neighbor_search.py b/yt/geometry/tests/test_neighbor_search.py deleted file mode 100644 index 426eaaed198..00000000000 --- a/yt/geometry/tests/test_neighbor_search.py +++ /dev/null @@ -1,43 +0,0 @@ -import numpy as np - -from yt.fields.particle_fields import add_nearest_neighbor_field -from yt.testing import assert_array_almost_equal, assert_equal, fake_particle_ds - - -def test_neighbor_search(): - # skip for now, in principle we can reimplement this in the demeshening - import nose - - raise nose.SkipTest - np.random.seed(0x4D3D3D3) - ds = fake_particle_ds(npart=16 ** 3) - ds.periodicity = (True, True, True) - ds.index - (fn,) = add_nearest_neighbor_field("all", "particle_position", ds) - dd = ds.all_data() - nearest_neighbors = dd[fn] - pos = dd["particle_position"] - all_neighbors = np.zeros_like(nearest_neighbors) - any_eq = np.zeros(pos.shape[0], dtype="bool") - min_in = np.zeros(pos.shape[0], dtype="int64") - for i in range(pos.shape[0]): - dd.set_field_parameter("center", pos[i, :]) - # radius = dd["particle_radius"] - # radius.sort() - r2 = (pos[:, 0] * pos[:, 0]) * 0 - for j in range(3): - DR = pos[i, j] - pos[:, j] - DRo = DR.copy() - DR[DRo > ds.domain_width[j] / 2.0] -= ds.domain_width[j] - DR[DRo < -ds.domain_width[j] / 2.0] += ds.domain_width[j] - r2 += DR * DR - radius = np.sqrt(r2) - radius.sort() - assert radius[0] == 0.0 - all_neighbors[i] = radius[63] - any_eq[i] = np.any(np.abs(radius - nearest_neighbors[i]) < 1e-7) - min_in[i] = np.argmin(np.abs(radius - nearest_neighbors[i])) - # if i == 34: raise RuntimeError - # dd.field_data.pop(("all", "particle_radius")) - assert_equal((min_in == 63).sum(), min_in.size) - assert_array_almost_equal(nearest_neighbors, all_neighbors) From 62b7cc0b27f5a75f2cd464f9ef8cdeab8d229aac Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 11:48:54 +0200 Subject: [PATCH 528/653] expose ytLogger.setLevel with a user facing method yt.set_log_level --- doc/source/faq/index.rst | 19 +++++++------ doc/source/reference/configuration.rst | 2 +- yt/__init__.py | 2 ++ yt/utilities/logger.py | 37 +++++++++++++++++++++++--- 4 files changed, 45 insertions(+), 15 deletions(-) diff --git a/doc/source/faq/index.rst b/doc/source/faq/index.rst index 126061b4fe5..d8af617d3ba 100644 --- a/doc/source/faq/index.rst +++ b/doc/source/faq/index.rst @@ -423,12 +423,12 @@ behavior in yt-3.0. How can I change yt's log level? ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ -yt's default log level is ``INFO``. However, you may want less voluminous logging, especially -if you are in an IPython notebook or running a long or parallel script. On the other -hand, you may want it to output a lot more, since you can't figure out exactly what's going -wrong, and you want to output some debugging information. The yt log level can be -changed using the :ref:`configuration-file`, either by setting it in the -``$HOME/.config/yt/ytrc`` file: +yt's default log level is ``INFO``. However, you may want less voluminous logging, +especially if you are in an IPython notebook or running a long or parallel script. +On the other hand, you may want it to output a lot more, since you can't figure out +exactly what's going wrong, and you want to output some debugging information. +The default yt log level can be changed using the :ref:`configuration-file`, +either by setting it in the ``$HOME/.config/yt/ytrc`` file: .. code-block:: bash @@ -438,11 +438,10 @@ which would produce debug (as well as info, warning, and error) messages, or at .. code-block:: python - from yt.funcs import mylog - mylog.setLevel(40) # This sets the log level to "ERROR" + yt.set_log_level("error") -which in this case would suppress everything below error messages. For reference, the numerical -values corresponding to different log levels are: +which in this case would suppress everything below error messages. For reference, +the numerical values corresponding to different log levels are: .. csv-table:: :header: Level, Numeric Value diff --git a/doc/source/reference/configuration.rst b/doc/source/reference/configuration.rst index 7d581e19997..fb0fee253fe 100644 --- a/doc/source/reference/configuration.rst +++ b/doc/source/reference/configuration.rst @@ -63,7 +63,7 @@ Here is an example script, where we adjust the logging at startup: .. code-block:: python import yt - yt.funcs.mylog.setLevel(1) + yt.set_log_level(1) ds = yt.load("my_data0001") ds.print_stats() diff --git a/yt/__init__.py b/yt/__init__.py index 9c69e4fb576..d71bb1c0b81 100644 --- a/yt/__init__.py +++ b/yt/__init__.py @@ -39,6 +39,8 @@ toggle_interactivity, ) from yt.utilities.logger import ytLogger as mylog +from yt.utilities.logger import set_log_level + import yt.utilities.physical_constants as physical_constants import yt.units as units diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index b83b2eaaa24..cdee35f6ad7 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -30,7 +30,37 @@ def new(*args): return new -level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50) +def set_log_level(level): + """ + Select which minimal logging level should be displayed. + + Parameters + ---------- + level: int or str + Possible values by increasing level: + 0 or "notset" + 1 or "all" + 10 or "debug" + 20 or "info" + 30 or "warning" + 40 or "error" + 50 or "critical" + """ + # this is a user-facing interface to avoid importing from yt.utilities in user code. + if isinstance(level, str): + level = { + "notset": 0, + "all": 1, + "debug": 10, + "info": 20, + "warning": 30, + "error": 40, + "critical": 50, + }[level.lower()] + ytLogger.setLevel(level) + ytLogger.debug("Set log level to %d", level) + + ufstring = "%(name)-3s: [%(levelname)-9s] %(asctime)s %(message)s" cfstring = "%(name)-3s: [%(levelname)-18s] %(asctime)s %(message)s" @@ -76,12 +106,11 @@ def uncolorize_logging(): yt_sh.setFormatter(formatter) # add the handler to the logger ytLogger.addHandler(yt_sh) - ytLogger.setLevel(level) + level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50) + set_log_level(level) ytLogger.propagate = False original_emitter = yt_sh.emit if ytcfg.getboolean("yt", "coloredlogs"): colorize_logging() - -ytLogger.debug("Set log level to %s", level) From 00b91fb41944b523d2fbb5020deee7fb73792f2f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 12:30:55 +0200 Subject: [PATCH 529/653] simplify implementation and add tests --- yt/utilities/logger.py | 14 +++++--------- yt/utilities/tests/test_set_log_level.py | 19 +++++++++++++++++++ 2 files changed, 24 insertions(+), 9 deletions(-) create mode 100644 yt/utilities/tests/test_set_log_level.py diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index cdee35f6ad7..a189799f8a4 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -47,16 +47,12 @@ def set_log_level(level): 50 or "critical" """ # this is a user-facing interface to avoid importing from yt.utilities in user code. + if isinstance(level, str): - level = { - "notset": 0, - "all": 1, - "debug": 10, - "info": 20, - "warning": 30, - "error": 40, - "critical": 50, - }[level.lower()] + level = level.upper() + + if level == "ALL": # non-standard alias + level = 1 ytLogger.setLevel(level) ytLogger.debug("Set log level to %d", level) diff --git a/yt/utilities/tests/test_set_log_level.py b/yt/utilities/tests/test_set_log_level.py new file mode 100644 index 00000000000..7182a5eb1d6 --- /dev/null +++ b/yt/utilities/tests/test_set_log_level.py @@ -0,0 +1,19 @@ +from yt.testing import assert_raises +from yt.utilities.logger import set_log_level + + +def test_valid_level(): + # test a subset of valid entries to cover + # - case-insensitivity + # - integer values + # - "all" alias, which isn't standard + for lvl in ("all", "ALL", 10, 42, "info", "warning", "ERROR", "CRITICAL"): + set_log_level(lvl) + + +def test_invalid_level(): + # these are the exceptions raised by logging.Logger.setLog + # since they are perfectly clear and readable, we check that nothing else + # happens in the wrapper + assert_raises(TypeError, set_log_level, 1.5) + assert_raises(ValueError, set_log_level, "invalid_level") From 1901f80d7fd94f38d265d261af7f71b0cd67d360 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 13:33:50 +0200 Subject: [PATCH 530/653] fix a breakage in mods --- yt/mods.py | 4 ++-- yt/utilities/logger.py | 5 +++-- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/yt/mods.py b/yt/mods.py index 6654ef49561..2183f097571 100644 --- a/yt/mods.py +++ b/yt/mods.py @@ -12,12 +12,12 @@ import yt.startup_tasks as __startup_tasks from yt import * from yt.config import ytcfg, ytcfg_defaults -from yt.utilities.logger import level as __level +from yt.utilities.logger import _level unparsed_args = __startup_tasks.unparsed_args -if __level >= int(ytcfg_defaults["loglevel"]): +if _level >= int(ytcfg_defaults["loglevel"]): # This won't get displayed. mylog.debug("Turning off NumPy error reporting") np.seterr(all="ignore") diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index a189799f8a4..e3ab9eb2722 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -93,6 +93,8 @@ def uncolorize_logging(): pass +_level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50) + if ytcfg.getboolean("yt", "suppressStreamLogging"): disable_stream_logging() else: @@ -102,8 +104,7 @@ def uncolorize_logging(): yt_sh.setFormatter(formatter) # add the handler to the logger ytLogger.addHandler(yt_sh) - level = min(max(ytcfg.getint("yt", "loglevel"), 0), 50) - set_log_level(level) + set_log_level(_level) ytLogger.propagate = False original_emitter = yt_sh.emit From 70cfa4659d9577ec9779b3be24fa8a73fd6e2aa0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 16:09:23 +0200 Subject: [PATCH 531/653] cleanup: use 'raise from' in wrapped exceptions. Also fix some broad error catching. --- yt/funcs.py | 4 ++-- yt/testing.py | 8 ++++---- yt/utilities/lib/pixelization_routines.pyx | 1 + yt/visualization/color_maps.py | 4 ++-- yt/visualization/image_writer.py | 4 ++-- yt/visualization/plot_modifications.py | 4 ++-- yt/visualization/plot_window.py | 4 ++-- 7 files changed, 15 insertions(+), 14 deletions(-) diff --git a/yt/funcs.py b/yt/funcs.py index f494e792f92..0f10761280b 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -41,7 +41,7 @@ def iterable(obj): """ try: len(obj) - except Exception: + except TypeError: return False return True @@ -1187,7 +1187,7 @@ def get_hash(infile, algorithm="md5", BLOCKSIZE=65536): try: hasher = getattr(hashlib, algorithm)() - except Exception: + except AttributeError as e: raise NotImplementedError( f"'{algorithm}' not available! Available algorithms: {hashlib.algorithms}" ) diff --git a/yt/testing.py b/yt/testing.py index c341e1a0253..4091247bf8c 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -1133,11 +1133,11 @@ def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): try: des = des.in_units(act.units) - except UnitOperationError: + except UnitOperationError as e: raise AssertionError( "Units of actual (%s) and desired (%s) do not have " "equivalent dimensions" % (act.units, des.units) - ) + ) from e rt = YTArray(rtol) if not rt.units.is_dimensionless: @@ -1148,11 +1148,11 @@ def assert_allclose_units(actual, desired, rtol=1e-7, atol=0, **kwargs): try: at = at.in_units(act.units) - except UnitOperationError: + except UnitOperationError as e: raise AssertionError( "Units of atol (%s) and actual (%s) do not have " "equivalent dimensions" % (at.units, act.units) - ) + ) from e # units have been validated, so we strip units before calling numpy # to avoid spurious errors diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index c98d7623280..314fe449937 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -1555,6 +1555,7 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, buff[xi, yi, zi] += prefactor_j * kernel_func(q_ij) + def pixelize_element_mesh_line(np.ndarray[np.float64_t, ndim=2] coords, np.ndarray[np.int64_t, ndim=2] conn, np.ndarray[np.float64_t, ndim=1] start_point, diff --git a/yt/visualization/color_maps.py b/yt/visualization/color_maps.py index d79365c2019..da0977deb02 100644 --- a/yt/visualization/color_maps.py +++ b/yt/visualization/color_maps.py @@ -589,12 +589,12 @@ def show_colormaps(subset="all", filename=None): maps = [m for m in plt.colormaps() if m in subset] if len(maps) == 0: raise AttributeError - except AttributeError: + except AttributeError as e: raise AttributeError( "show_colormaps requires subset attribute " "to be 'all', 'yt_native', or a list of " "valid colormap names." - ) + ) from e maps = sorted(set(maps)) # scale the image size by the number of cmaps plt.figure(figsize=(2.0 * len(maps) / 10.0, 6)) diff --git a/yt/visualization/image_writer.py b/yt/visualization/image_writer.py index 7244c979775..4eb44fe9742 100644 --- a/yt/visualization/image_writer.py +++ b/yt/visualization/image_writer.py @@ -254,7 +254,7 @@ def apply_colormap(image, color_bounds=None, cmap_name=None, func=lambda x: x): def map_to_colors(buff, cmap_name): try: lut = cmd.color_map_luts[cmap_name] - except KeyError: + except KeyError as e: try: # if cmap is tuple, then we're using palettable or brewer2mpl cmaps if isinstance(cmap_name, tuple): @@ -267,7 +267,7 @@ def map_to_colors(buff, cmap_name): raise KeyError( "Your color map (%s) was not found in either the extracted" " colormap file or matplotlib colormaps" % cmap_name - ) + ) from e if isinstance(cmap_name, tuple): # If we are using the colorbrewer maps, don't interpolate diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index 0eb51d19d31..b144cae7226 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -951,13 +951,13 @@ def __call__(self, plot): try: linewidth *= mask self.plot_args["linewidth"] = linewidth - except ValueError: + except ValueError as e: err_msg = ( "Error applying display threshold: linewidth" + "must have shape ({}, {}) or be scalar" ) err_msg = err_msg.format(nx, ny) - raise ValueError(err_msg) + raise ValueError(err_msg) from e else: field_colors = None diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 527a2f98905..78557c4d3bd 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -768,8 +768,8 @@ def set_axes_unit(self, unit_name): for un in unit_name: try: self.ds.length_unit.in_units(un) - except (UnitConversionError, UnitParseError): - raise YTUnitNotRecognized(un) + except (UnitConversionError, UnitParseError) as e: + raise YTUnitNotRecognized(un) from e self._axes_unit_names = unit_name return self From dc8f7de1f06b4653294aea2a1768a441dcba9002 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 16:33:31 +0200 Subject: [PATCH 532/653] refactor simplify YTPlotCallbackError traceback chaining --- yt/utilities/exceptions.py | 6 ++---- yt/visualization/plot_window.py | 3 +-- 2 files changed, 3 insertions(+), 6 deletions(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index b610e167773..30967f9e326 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -594,13 +594,11 @@ def __str__(self): class YTPlotCallbackError(Exception): - def __init__(self, callback, error): + def __init__(self, callback): self.callback = "annotate_" + callback - self.error = error def __str__(self): - msg = "%s callback failed with the following error: %s" - return msg % (self.callback, self.error) + return "%s callback failed" % self.callback class YTPixelizeError(YTException): diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 78557c4d3bd..3baca50c2ae 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1276,8 +1276,7 @@ def run_callbacks(self): except YTDataTypeUnsupported as e: raise e except Exception as e: - new_exc = YTPlotCallbackError(callback._type_name, e) - raise new_exc.with_traceback(sys.exc_info()[2]) + raise YTPlotCallbackError(callback._type_name) from e for key in self.frb.keys(): if key not in keys: del self.frb[key] From 1896572c10716e035cb6a5d8fb46b86f949fb889 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 16:40:39 +0200 Subject: [PATCH 533/653] cleanup: remove useless pass statement --- doc/extensions/pythonscript_sphinxext.py | 1 - 1 file changed, 1 deletion(-) diff --git a/doc/extensions/pythonscript_sphinxext.py b/doc/extensions/pythonscript_sphinxext.py index 9c8ac255645..f322a474417 100644 --- a/doc/extensions/pythonscript_sphinxext.py +++ b/doc/extensions/pythonscript_sphinxext.py @@ -101,4 +101,3 @@ def thread_safe_mkdir(dirname): except OSError as e: if e.errno != errno.EEXIST: raise - pass From 3dcda984a6b1552e5f500c4195487f02ec88c1ef Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 19 Jul 2020 16:42:42 +0200 Subject: [PATCH 534/653] replace with_traceback with raise from --- setupext.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/setupext.py b/setupext.py index 193495aa953..6b20f8b5bc2 100644 --- a/setupext.py +++ b/setupext.py @@ -263,7 +263,7 @@ def get_cpu_count(): raise ValueError( "MAX_BUILD_CORES must be set to an integer. " + "See above for original error." - ).with_traceback(e.__traceback__) + ) from e max_cores = min(cpu_count, user_max_cores) return max_cores @@ -307,12 +307,12 @@ def finalize_options(self): try: import cython import numpy - except ImportError: + except ImportError as e: raise ImportError( """Could not import cython or numpy. Building yt from source requires cython and numpy to be installed. Please install these packages using the appropriate package manager for your python environment.""" - ) + ) from e if LooseVersion(cython.__version__) < LooseVersion("0.26.1"): raise RuntimeError( """Building yt from source requires Cython 0.26.1 or newer but From 144d05664f91cd5137c243f795ffd7f05c77bd29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 20 Jul 2020 13:58:32 +0200 Subject: [PATCH 535/653] fix a bunch of wrapped errors --- .../construction_data_containers.py | 4 ++-- yt/data_objects/data_containers.py | 24 +++++++++---------- yt/data_objects/particle_trajectories.py | 4 ++-- yt/data_objects/profiles.py | 4 ++-- yt/data_objects/time_series.py | 4 ++-- yt/fields/xray_emission_fields.py | 23 +++++++++--------- yt/frontends/enzo/data_structures.py | 4 ++-- yt/frontends/enzo_p/io.py | 4 ++-- yt/frontends/exodus_ii/data_structures.py | 4 ++-- yt/frontends/flash/data_structures.py | 8 +++---- yt/frontends/gamer/data_structures.py | 5 +--- yt/frontends/gdf/data_structures.py | 4 ++-- yt/frontends/stream/data_structures.py | 1 - yt/utilities/answer_testing/utils.py | 2 +- yt/utilities/command_line.py | 16 ++++++------- yt/utilities/particle_generator.py | 4 ++-- yt/visualization/plot_modifications.py | 4 ++-- yt/visualization/plot_window.py | 1 - .../interactive_vr_helpers.py | 4 ++-- 19 files changed, 59 insertions(+), 65 deletions(-) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index cf152ab1e60..4de3337126f 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -789,13 +789,13 @@ def get_data(self, fields=None): return try: fill, gen, part, alias = self._split_fields(fields_to_get) - except NeedsGridType: + except NeedsGridType as e: if self._num_ghost_zones == 0: raise RuntimeError( "Attempting to access a field that needs ghost zones, but " "num_ghost_zones = %s. You should create the covering grid " "with nonzero num_ghost_zones." % self._num_ghost_zones - ) + ) from e else: raise diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index fea2377141c..8d32e3a5bf3 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -385,7 +385,7 @@ def _generate_particle_field(self, field): finfo.check_available(gen_obj) except NeedsGridType as ngt_exception: if ngt_exception.ghost_zones != 0: - raise NotImplementedError + raise NotImplementedError from ngt_exception size = self._count_particles(ftype) rv = self.ds.arr(np.empty(size, dtype="float64"), finfo.units) ind = 0 @@ -802,13 +802,13 @@ def create_firefly_object( try: from firefly_api.particlegroup import ParticleGroup from firefly_api.reader import Reader - except ImportError: + except ImportError as e: raise ImportError( "Can't find firefly_api, ensure it" - + "is in your python path or install it with" - + "'$ pip install firefly_api'. It is also available" - + "on github at github.com/agurvich/firefly_api" - ) + "is in your python path or install it with" + "'$ pip install firefly_api'. It is also available" + "on github at github.com/agurvich/firefly_api" + ) from e ## handle default arguments fields_to_include = [] if fields_to_include is None else fields_to_include @@ -1746,17 +1746,17 @@ def _generate_fields(self, fields_to_generate): ) try: fd.convert_to_units(fi.units) - except AttributeError: + except AttributeError as e: # If the field returns an ndarray, coerce to a # dimensionless YTArray and verify that field is # supposed to be unitless fd = self.ds.arr(fd, "") if fi.units != "": - raise YTFieldUnitError(fi, fd.units) - except UnitConversionError: - raise YTFieldUnitError(fi, fd.units) - except UnitParseError: - raise YTFieldUnitParseError(fi) + raise YTFieldUnitError(fi, fd.units) from e + except UnitConversionError as e: + raise YTFieldUnitError(fi, fd.units) from e + except UnitParseError as e: + raise YTFieldUnitParseError(fi) from e self.field_data[field] = fd except GenerationInProgress as gip: for f in gip.fields: diff --git a/yt/data_objects/particle_trajectories.py b/yt/data_objects/particle_trajectories.py index 562772667df..5449cab7ef2 100644 --- a/yt/data_objects/particle_trajectories.py +++ b/yt/data_objects/particle_trajectories.py @@ -124,10 +124,10 @@ def __init__( # duplicate. This is due to the fact that the rhs # would then have a different shape as the lhs output_field[indices, i] = pfields[field] - except ValueError: + except ValueError as e: raise YTIllDefinedParticleData( "This dataset contains duplicate particle indices!" - ) + ) from e self.field_data[field] = array_like_field( dd_first, output_field.copy(), fds[field] ) diff --git a/yt/data_objects/profiles.py b/yt/data_objects/profiles.py index 1364e81fbbf..42faad03644 100644 --- a/yt/data_objects/profiles.py +++ b/yt/data_objects/profiles.py @@ -1368,7 +1368,7 @@ def create_profile( bf_units = data_source.ds.field_info[bin_field].output_units try: field_ex = list(extrema[bin_field[-1]]) - except KeyError: + except KeyError as e: try: field_ex = list(extrema[bin_field]) except KeyError: @@ -1376,7 +1376,7 @@ def create_profile( "Could not find field {0} or {1} in extrema".format( bin_field[-1], bin_field ) - ) + ) from e if isinstance(field_ex[0], tuple): field_ex = [data_source.ds.quan(*f) for f in field_ex] diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 0c0635ad65f..b499c7c1d2c 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -167,8 +167,8 @@ def __new__(cls, outputs, *args, **kwargs): ret = super(DatasetSeries, cls).__new__(cls) try: ret._pre_outputs = outputs[:] - except TypeError: - raise YTOutputNotIdentified(outputs) + except TypeError as e: + raise YTOutputNotIdentified(outputs) from e return ret def __init__( diff --git a/yt/fields/xray_emission_fields.py b/yt/fields/xray_emission_fields.py index 900a4cd15bf..3f335705cde 100644 --- a/yt/fields/xray_emission_fields.py +++ b/yt/fields/xray_emission_fields.py @@ -216,11 +216,11 @@ def add_xray_emissivity_field( if not isinstance(metallicity, float) and metallicity is not None: try: metallicity = ds._get_field_info(*metallicity) - except YTFieldNotFound: + except YTFieldNotFound as e: raise RuntimeError( f"Your dataset does not have a {metallicity} field! " + "Perhaps you should specify a constant metallicity instead?" - ) + ) from e if table_type == "cloudy": # Cloudy wants to scale by nH**2 @@ -327,16 +327,17 @@ def _photon_emissivity_field(field, data): ) else: redshift = 0.0 # Only for local sources! - if not isinstance(dist, YTQuantity): - try: - dist = ds.quan(dist[0], dist[1]) - except TypeError: - raise RuntimeError( - "Please specifiy 'dist' as a YTQuantity " - "or a (value, unit) tuple!" - ) - else: + try: + # normal behaviour, if dist is a YTQuantity dist = ds.quan(dist.value, dist.units) + except AttributeError as e: + try: + dist = ds.quan(*dist) + except (RuntimeError, TypeError): + raise TypeError( + "dist should be a YTQuantity " "or a (value, unit) tuple!" + ) from e + angular_scale = dist / ds.quan(1.0, "radian") dist_fac = ds.quan( 1.0 / (4.0 * np.pi * dist * dist * angular_scale * angular_scale).v, diff --git a/yt/frontends/enzo/data_structures.py b/yt/frontends/enzo/data_structures.py index d1dfdf29b2d..cc80e8d6722 100644 --- a/yt/frontends/enzo/data_structures.py +++ b/yt/frontends/enzo/data_structures.py @@ -413,8 +413,8 @@ def _detect_output_fields(self): continue try: gf = self.io._read_field_names(grid) - except self.io._read_exception: - raise IOError("Grid %s is a bit funky?", grid.id) + except self.io._read_exception as e: + raise IOError("Grid %s is a bit funky?", grid.id) from e mylog.debug("Grid %s has: %s", grid.id, gf) field_list = field_list.union(gf) if "AppendActiveParticleType" in self.dataset.parameters: diff --git a/yt/frontends/enzo_p/io.py b/yt/frontends/enzo_p/io.py index de985408d6c..bec46281dbd 100644 --- a/yt/frontends/enzo_p/io.py +++ b/yt/frontends/enzo_p/io.py @@ -26,12 +26,12 @@ def _read_field_names(self, grid): f = h5py.File(grid.filename, mode="r") try: group = f[grid.block_name] - except KeyError: + except KeyError as e: raise YTException( message="Grid %s is missing from data file %s." % (grid.block_name, grid.filename), ds=self.ds, - ) + ) from e fields = [] ptypes = set() dtypes = set() diff --git a/yt/frontends/exodus_ii/data_structures.py b/yt/frontends/exodus_ii/data_structures.py index 039ba1795b8..fc9a98757de 100644 --- a/yt/frontends/exodus_ii/data_structures.py +++ b/yt/frontends/exodus_ii/data_structures.py @@ -227,10 +227,10 @@ def _get_current_time(self): with self._handle.open_ds() as ds: try: return ds.variables["time_whole"][self.step] - except IndexError: + except IndexError as e: raise RuntimeError( "Invalid step number, max is %d" % (self.num_steps - 1) - ) + ) from e except (KeyError, TypeError): return 0.0 diff --git a/yt/frontends/flash/data_structures.py b/yt/frontends/flash/data_structures.py index 3732b4fed00..fe0bbd9ae1d 100644 --- a/yt/frontends/flash/data_structures.py +++ b/yt/frontends/flash/data_structures.py @@ -193,14 +193,12 @@ def __init__( mylog.info( "Particle file found: %s", self.particle_filename.split("/")[-1] ) - except IOError: + except OSError: self._particle_handle = self._handle else: # particle_filename is specified by user - try: - self._particle_handle = HDF5FileHandler(self.particle_filename) - except Exception: - raise IOError(self.particle_filename) + self._particle_handle = HDF5FileHandler(self.particle_filename) + # Check if the particle file has the same time if self._particle_handle != self._handle: part_time = self._particle_handle.handle.get("real scalars")[0][1] diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index bc8a882264a..86b3df347e7 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -233,10 +233,7 @@ def __init__( if self.particle_filename is None: self._particle_handle = self._handle else: - try: - self._particle_handle = HDF5FileHandler(self.particle_filename) - except Exception: - raise IOError(self.particle_filename) + self._particle_handle = HDF5FileHandler(self.particle_filename) # currently GAMER only supports refinement by a factor of 2 self.refine_by = 2 diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index 33f7a7b8389..f01728fb827 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -245,8 +245,8 @@ def _parse_parameter_file(self): geometry = just_one(sp.get("geometry", 0)) try: self.geometry = GEOMETRY_TRANS[geometry] - except KeyError: - raise YTGDFUnknownGeometry(geometry) + except KeyError as e: + raise YTGDFUnknownGeometry(geometry) from e self.parameters.update(sp) self.domain_left_edge = sp["domain_left_edge"][:] self.domain_right_edge = sp["domain_right_edge"][:] diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 8004a2870a3..9e774e80e3b 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -363,7 +363,6 @@ def all_fields(self): fields = list(set(fields)) return fields - class StreamParticleIndex(SPHParticleIndex): def __init__(self, ds, dataset_type=None): self.stream_handler = ds.stream_handler diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 02ceaef19ed..1f9d802c27e 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -186,7 +186,7 @@ def generate_hash(data): if isinstance(data, dict): hd = _hash_dict(data) else: - raise TypeError + raise return hd diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 51d8664fbee..395a3decd49 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -134,8 +134,8 @@ def _print_installation_information(path): def _get_girder_client(): try: import girder_client - except ImportError: - raise YTCommandRequiresModule("girder_client") + except ImportError as e: + raise YTCommandRequiresModule("girder_client") from e if not ytcfg.get("yt", "hub_api_key"): print("Before you can access the yt Hub you need an API key") print("In order to obtain one, either register by typing:") @@ -797,8 +797,8 @@ class YTHubRegisterCmd(YTCommand): def __call__(self, args): try: import requests - except ImportError: - raise YTCommandRequiresModule("requests") + except ImportError as e: + raise YTCommandRequiresModule("requests") from e if ytcfg.get("yt", "hub_api_key") != "": print("You seem to already have an API key for the hub in") print(f"{CURRENT_CONFIG_FILE} . Delete this if you want to force a") @@ -1045,12 +1045,12 @@ def __call__(self, args): PannableMapServer(p.data_source, args.field, args.takelog, args.cmap) try: import bottle - except ImportError: + except ImportError as e: raise ImportError( "The mapserver functionality requires the bottle " "package to be installed. Please install using `pip " "install bottle`." - ) + ) from e bottle.debug(True) if args.host is not None: colonpl = args.host.find(":") @@ -1607,8 +1607,8 @@ class YTUploadFileCmd(YTCommand): def __call__(self, args): try: import requests - except ImportError: - raise YTCommandRequiresModule("requests") + except ImportError as e: + raise YTCommandRequiresModule("requests") from e fs = iter(FileStreamer(open(args.file, "rb"))) upload_url = ytcfg.get("yt", "curldrop_upload_url") diff --git a/yt/utilities/particle_generator.py b/yt/utilities/particle_generator.py index ab3a9b35b04..ffcf0980cbe 100644 --- a/yt/utilities/particle_generator.py +++ b/yt/utilities/particle_generator.py @@ -33,11 +33,11 @@ def __init__(self, ds, num_particles, field_list, ptype="io"): self.posx_index = self.field_list.index(self.default_fields[0]) self.posy_index = self.field_list.index(self.default_fields[1]) self.posz_index = self.field_list.index(self.default_fields[2]) - except Exception: + except Exception as e: raise KeyError( "You must specify position fields: " + " ".join(["particle_position_%s" % ax for ax in "xyz"]) - ) + ) from e self.index_index = self.field_list.index((ptype, "particle_index")) self.num_grids = self.ds.index.num_grids diff --git a/yt/visualization/plot_modifications.py b/yt/visualization/plot_modifications.py index b144cae7226..fd7c5c9302d 100644 --- a/yt/visualization/plot_modifications.py +++ b/yt/visualization/plot_modifications.py @@ -2732,12 +2732,12 @@ def __call__(self, plot): setter_func = "set_" + key try: getattr(fontproperties, setter_func)(val) - except AttributeError: + except AttributeError as e: raise AttributeError( "Cannot set text_args keyword " "to include '%s' because MPL's fontproperties object does " "not contain function '%s'." % (key, setter_func) - ) + ) from e # this "anchors" the size bar to a box centered on self.pos in axis # coordinates diff --git a/yt/visualization/plot_window.py b/yt/visualization/plot_window.py index 3baca50c2ae..2f712db5af4 100644 --- a/yt/visualization/plot_window.py +++ b/yt/visualization/plot_window.py @@ -1,4 +1,3 @@ -import sys import types from collections import defaultdict from distutils.version import LooseVersion diff --git a/yt/visualization/volume_rendering/interactive_vr_helpers.py b/yt/visualization/volume_rendering/interactive_vr_helpers.py index d146f35b127..6335206832c 100644 --- a/yt/visualization/volume_rendering/interactive_vr_helpers.py +++ b/yt/visualization/volume_rendering/interactive_vr_helpers.py @@ -44,11 +44,11 @@ def _render_opengl( try: import cyglfw3 # NOQA import OpenGL.GL # NOQA - except ImportError: + except ImportError as e: raise ImportError( "This functionality requires the cyglfw3 and PyOpenGL " "packages to be installed." - ) + ) from e from .interactive_loop import RenderingContext from .interactive_vr import ( From fee0525b4c3159d8a6fff39b6a891c82bbea0a5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 07:53:49 +0200 Subject: [PATCH 536/653] flynting --- yt/utilities/exceptions.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 30967f9e326..17ad18428b8 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -598,7 +598,7 @@ def __init__(self, callback): self.callback = "annotate_" + callback def __str__(self): - return "%s callback failed" % self.callback + return f"{self.callback} callback failed" class YTPixelizeError(YTException): From 26ae796f49bf0f1a8e7cb85a8d1bc3238ca42c98 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 08:56:59 +0200 Subject: [PATCH 537/653] fix a F841 error --- yt/funcs.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/funcs.py b/yt/funcs.py index 0f10761280b..0eb68de22b4 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -1190,7 +1190,7 @@ def get_hash(infile, algorithm="md5", BLOCKSIZE=65536): except AttributeError as e: raise NotImplementedError( f"'{algorithm}' not available! Available algorithms: {hashlib.algorithms}" - ) + ) from e filesize = os.path.getsize(infile) iterations = int(float(filesize) / float(BLOCKSIZE)) From 958732c9a69330fb55e4b3139a99a0651d9d4669 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 17:12:50 +0200 Subject: [PATCH 538/653] fix final error --- yt/loaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index fecda01bed2..2e9aaf3c72a 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -133,8 +133,8 @@ def load_simulation(fn, simulation_type, find_outputs=False): try: cls = simulation_time_series_registry[simulation_type] - except KeyError: - raise YTSimulationNotIdentified(simulation_type) + except KeyError as e: + raise YTSimulationNotIdentified(simulation_type) from e return cls(fn, find_outputs=find_outputs) From f8bd86594921b2c35f9f2bdebdf898e6bf1eb3a0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 08:22:43 +0200 Subject: [PATCH 539/653] deprecate YTOutputNotIdentified in favor of YTUnidentifiedDataType --- doc/source/reference/configuration.rst | 2 +- yt/data_objects/time_series.py | 4 ++-- yt/frontends/enzo/simulation_handling.py | 4 ++-- yt/frontends/exodus_ii/simulation_handling.py | 4 ++-- yt/frontends/gadget/simulation_handling.py | 4 ++-- yt/loaders.py | 6 +++--- yt/tests/test_load_errors.py | 6 +++--- yt/utilities/command_line.py | 4 ++-- yt/utilities/exceptions.py | 16 ++++++++++++++-- 9 files changed, 31 insertions(+), 19 deletions(-) diff --git a/doc/source/reference/configuration.rst b/doc/source/reference/configuration.rst index 7d581e19997..408db6dcce2 100644 --- a/doc/source/reference/configuration.rst +++ b/doc/source/reference/configuration.rst @@ -102,7 +102,7 @@ used internally. setting will provide instructions for setting this. * ``requires_ds_strict`` (default: ``True``): If true, answer tests wrapped with :func:`~yt.utilities.answer_testing.framework.requires_ds` will raise - :class:`~yt.utilities.exceptions.YTOutputNotIdentified` rather than consuming + :class:`~yt.utilities.exceptions.YTUnidentifiedDataType` rather than consuming it if required dataset is not present. * ``serialize`` (default: ``False``): If true, perform automatic :ref:`object serialization ` diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 0c0635ad65f..ad2661b61e8 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -12,7 +12,7 @@ from yt.data_objects.particle_trajectories import ParticleTrajectories from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.exceptions import YTException, YTOutputNotIdentified +from yt.utilities.exceptions import YTException, YTUnidentifiedDataType from yt.utilities.object_registries import ( analysis_task_registry, data_object_registry, @@ -168,7 +168,7 @@ def __new__(cls, outputs, *args, **kwargs): try: ret._pre_outputs = outputs[:] except TypeError: - raise YTOutputNotIdentified(outputs) + raise YTUnidentifiedDataType(outputs) return ret def __init__( diff --git a/yt/frontends/enzo/simulation_handling.py b/yt/frontends/enzo/simulation_handling.py index 0408239e7eb..68005607805 100644 --- a/yt/frontends/enzo/simulation_handling.py +++ b/yt/frontends/enzo/simulation_handling.py @@ -13,7 +13,7 @@ InvalidSimulationTimeSeries, MissingParameter, NoStoppingCondition, - YTOutputNotIdentified, + YTUnidentifiedDataType, ) from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects @@ -659,7 +659,7 @@ def _check_for_outputs(self, potential_outputs): ) try: ds = load(filename) - except (FileNotFoundError, YTOutputNotIdentified): + except (FileNotFoundError, YTUnidentifiedDataType): mylog.error("Failed to load %s", filename) continue my_storage.result = { diff --git a/yt/frontends/exodus_ii/simulation_handling.py b/yt/frontends/exodus_ii/simulation_handling.py index f751fef8902..47ed8aa1642 100644 --- a/yt/frontends/exodus_ii/simulation_handling.py +++ b/yt/frontends/exodus_ii/simulation_handling.py @@ -3,7 +3,7 @@ from yt.data_objects.time_series import DatasetSeries from yt.funcs import only_on_root from yt.loaders import load -from yt.utilities.exceptions import YTOutputNotIdentified +from yt.utilities.exceptions import YTUnidentifiedDataType from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects @@ -91,7 +91,7 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - except (FileNotFoundError, YTOutputNotIdentified): + except (FileNotFoundError, YTUnidentifiedDataType): mylog.error("Failed to load %s", output) continue my_storage.result = {"filename": output, "num_steps": ds.num_steps} diff --git a/yt/frontends/gadget/simulation_handling.py b/yt/frontends/gadget/simulation_handling.py index 81ab256c371..fb4994f54a0 100644 --- a/yt/frontends/gadget/simulation_handling.py +++ b/yt/frontends/gadget/simulation_handling.py @@ -13,7 +13,7 @@ InvalidSimulationTimeSeries, MissingParameter, NoStoppingCondition, - YTOutputNotIdentified, + YTUnidentifiedDataType, ) from yt.utilities.logger import ytLogger as mylog from yt.utilities.parallel_tools.parallel_analysis_interface import parallel_objects @@ -522,7 +522,7 @@ def _check_for_outputs(self, potential_outputs): ): try: ds = load(output) - except (FileNotFoundError, YTOutputNotIdentified): + except (FileNotFoundError, YTUnidentifiedDataType): mylog.error("Failed to load %s", output) continue my_storage.result = { diff --git a/yt/loaders.py b/yt/loaders.py index fecda01bed2..7105911fbd7 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -13,8 +13,8 @@ from yt.utilities.exceptions import ( YTAmbiguousDataType, YTIllDefinedAMR, - YTOutputNotIdentified, YTSimulationNotIdentified, + YTUnidentifiedDataType, ) from yt.utilities.hierarchy_inspection import find_lowest_subclasses from yt.utilities.lib.misc_utilities import get_box_grids_level @@ -57,7 +57,7 @@ def load(fn, *args, **kwargs): FileNotFoundError If fn does not match any existing file or directory. - yt.utilities.exceptions.YTOutputNotIdentified + yt.utilities.exceptions.YTUnidentifiedDataType If fn matches existing files or directories with undetermined format. yt.utilities.exceptions.YTAmbiguousDataType @@ -97,7 +97,7 @@ def load(fn, *args, **kwargs): if len(candidates) > 1: raise YTAmbiguousDataType(fn, candidates) - raise YTOutputNotIdentified(fn, args, kwargs) + raise YTUnidentifiedDataType(fn, args, kwargs) def load_simulation(fn, simulation_type, find_outputs=False): diff --git a/yt/tests/test_load_errors.py b/yt/tests/test_load_errors.py index a874e141a30..ba6bdf5f2e5 100644 --- a/yt/tests/test_load_errors.py +++ b/yt/tests/test_load_errors.py @@ -7,8 +7,8 @@ from yt.testing import assert_raises from yt.utilities.exceptions import ( YTAmbiguousDataType, - YTOutputNotIdentified, YTSimulationNotIdentified, + YTUnidentifiedDataType, ) from yt.utilities.object_registries import output_type_registry @@ -39,8 +39,8 @@ def test_load_unidentified_data(): with tempfile.TemporaryDirectory() as tmpdir: empty_file_path = Path(tmpdir) / "empty_file" empty_file_path.touch() - assert_raises(YTOutputNotIdentified, load, tmpdir) - assert_raises(YTOutputNotIdentified, load, empty_file_path) + assert_raises(YTUnidentifiedDataType, load, tmpdir) + assert_raises(YTUnidentifiedDataType, load, empty_file_path) assert_raises( YTSimulationNotIdentified, load_simulation, diff --git a/yt/utilities/command_line.py b/yt/utilities/command_line.py index 51d8664fbee..56ecf7447e5 100644 --- a/yt/utilities/command_line.py +++ b/yt/utilities/command_line.py @@ -33,7 +33,7 @@ from yt.utilities.exceptions import ( YTCommandRequiresModule, YTFieldNotParseable, - YTOutputNotIdentified, + YTUnidentifiedDataType, ) from yt.utilities.metadata import get_metadata from yt.visualization.plot_window import ProjectionPlot, SlicePlot @@ -1742,7 +1742,7 @@ def __call__(self, args): print("(% 10i/% 10i) Evaluating %s" % (i, len(candidates), c)) try: record = get_metadata(c, args.full_output) - except YTOutputNotIdentified: + except YTUnidentifiedDataType: continue records.append(record) with open(args.output, "w") as f: diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index b610e167773..d952aafa48c 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -13,7 +13,7 @@ def __init__(self, message=None, ds=None): # Data access exceptions: -class YTOutputNotIdentified(YTException): +class YTUnidentifiedDataType(YTException): def __init__(self, filename, args=None, kwargs=None): self.filename = filename self.args = args @@ -29,7 +29,19 @@ def __str__(self): return msg -class YTAmbiguousDataType(YTOutputNotIdentified): +class YTOutputNotIdentified(YTUnidentifiedDataType): + # kept for backwards compatibility + def __init__(self, filename, args=None, kwargs=None): + super(YTUnidentifiedDataType, self).__init__(filename, args, kwargs) + # this cannot be imported at the module level (creates circular imports) + from yt.funcs import issue_deprecation_warning + + issue_deprecation_warning( + "YTOutputNotIdentified is a deprecated alias for YTUnidentifiedDataType" + ) + + +class YTAmbiguousDataType(YTUnidentifiedDataType): def __init__(self, filename, candidates): self.filename = filename self.candidates = candidates From 3b0becd560e78eb042188a006afa78314b832f6f Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 6 Aug 2020 18:09:36 +0200 Subject: [PATCH 540/653] raise from --- yt/data_objects/time_series.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index ad2661b61e8..3728ce5e7fb 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -167,8 +167,8 @@ def __new__(cls, outputs, *args, **kwargs): ret = super(DatasetSeries, cls).__new__(cls) try: ret._pre_outputs = outputs[:] - except TypeError: - raise YTUnidentifiedDataType(outputs) + except TypeError as e: + raise YTUnidentifiedDataType(outputs) from e return ret def __init__( From 8f4141e116c862195af5f468acb4e69e1facf358 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 17:19:38 +0200 Subject: [PATCH 541/653] fix lines too long --- yt/data_objects/static_output.py | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 22e2d179594..0bb16ced30b 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1621,14 +1621,15 @@ def add_gradient_fields(self, fields=None, input_field=None): Examples -------- - >>> grad_fields = ds.add_gradient_fields(("gas","temperature")) print(grad_fields) - >>> [('gas', 'temperature_gradient_x'), ('gas', 'temperature_gradient_y'), ('gas', - >>> 'temperature_gradient_z'), ('gas', 'temperature_gradient_magnitude')] + >>> grad_fields = ds.add_gradient_fields(("gas","density")) + >>> print(grad_fields) + ... [('gas', 'density_gradient_x'), ('gas', 'density_gradient_y'), + ... ('gas', 'density_gradient_z'), ('gas', 'density_gradient_magnitude')] Note that the above example assumes ds.geometry == 'cartesian'. In general, the function will create gradient components along the axes of the dataset coordinate system. - For instance, with cylindrical data, one gets 'temperature_gradient_' + For instance, with cylindrical data, one gets 'density_gradient_' """ if input_field is not None: From 400ecec3e8fcd5b5200a4e193cae419de307915c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 17:20:26 +0200 Subject: [PATCH 542/653] fix black --- yt/frontends/stream/data_structures.py | 1 + 1 file changed, 1 insertion(+) diff --git a/yt/frontends/stream/data_structures.py b/yt/frontends/stream/data_structures.py index 9e774e80e3b..8004a2870a3 100644 --- a/yt/frontends/stream/data_structures.py +++ b/yt/frontends/stream/data_structures.py @@ -363,6 +363,7 @@ def all_fields(self): fields = list(set(fields)) return fields + class StreamParticleIndex(SPHParticleIndex): def __init__(self, ds, dataset_type=None): self.stream_handler = ds.stream_handler From edaf13114524ecab097abd6b88ec4ea0a28f2d04 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 17:43:44 +0200 Subject: [PATCH 543/653] trigger style check workflow on PR --- .github/workflows/style-checks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/style-checks.yaml b/.github/workflows/style-checks.yaml index 54cd19b5ad2..94d6d1d7020 100644 --- a/.github/workflows/style-checks.yaml +++ b/.github/workflows/style-checks.yaml @@ -1,5 +1,5 @@ name: Style Checks -on: [push] +on: [push, pull_request] jobs: flake8: From 04b7e7034dfa19f7160a849738391548f71f18ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 23:37:56 +0200 Subject: [PATCH 544/653] fix unbounded variable in shear calculation for 1D dataset (raise a meaningful error) --- yt/fields/fluid_vector_fields.py | 26 +++++++++++++++----------- 1 file changed, 15 insertions(+), 11 deletions(-) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 74b54c72284..57e90f46e97 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -3,6 +3,7 @@ from yt.fields.derived_field import ValidateParameter, ValidateSpatial from yt.funcs import just_one from yt.geometry.geometry_handler import is_curvilinear +from yt.utilities.exceptions import YTDimensionalityError from .field_plugin_registry import register_field_plugin from .vector_operations import create_magnitude_field, create_squared_field @@ -408,17 +409,20 @@ def _shear(field, data): (it's just like vorticity except add the derivative pairs instead of subtracting them) """ - if data.ds.dimensionality > 1: - vx = data[ftype, "relative_velocity_x"] - vy = data[ftype, "relative_velocity_y"] - dvydx = ( - vy[sl_right, sl_center, sl_center] - vy[sl_left, sl_center, sl_center] - ) / (div_fac * just_one(data["index", "dx"])) - dvxdy = ( - vx[sl_center, sl_right, sl_center] - vx[sl_center, sl_left, sl_center] - ) / (div_fac * just_one(data["index", "dy"])) - f = (dvydx + dvxdy) ** 2.0 - del dvydx, dvxdy + if data.ds.dimensionality == 1: + raise YTDimensionalityError("shear is meaningless in 1D") + + vx = data[ftype, "relative_velocity_x"] + vy = data[ftype, "relative_velocity_y"] + dvydx = ( + vy[sl_right, sl_center, sl_center] - vy[sl_left, sl_center, sl_center] + ) / (div_fac * just_one(data["index", "dx"])) + dvxdy = ( + vx[sl_center, sl_right, sl_center] - vx[sl_center, sl_left, sl_center] + ) / (div_fac * just_one(data["index", "dy"])) + f = (dvydx + dvxdy) ** 2.0 + del dvydx, dvxdy + if data.ds.dimensionality > 2: vz = data[ftype, "relative_velocity_z"] dvzdy = ( From 3ddb1894efa010a82039c82836ea41e3d9501ff7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 23:42:05 +0200 Subject: [PATCH 545/653] add an error for non-cartesian geometries --- yt/fields/fluid_vector_fields.py | 2 ++ 1 file changed, 2 insertions(+) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 57e90f46e97..1d330014e7f 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -411,6 +411,8 @@ def _shear(field, data): """ if data.ds.dimensionality == 1: raise YTDimensionalityError("shear is meaningless in 1D") + if data.ds.geometry != "cartesian": + raise NotImplementedError("shear is only supported in cartesian geometries") vx = data[ftype, "relative_velocity_x"] vy = data[ftype, "relative_velocity_y"] From 46a8936a88fb5c7ca9a19c29cc034908074b7a08 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 13 Aug 2020 08:17:47 +0200 Subject: [PATCH 546/653] try blocks instead of ifs; because dimensionality in the dataset doesn't necesseraly tell us how many velocity components there are (e.g. AMRVAC can run 2D simulations with 3 velocity components) --- yt/fields/fluid_vector_fields.py | 21 +++++++++++++++------ 1 file changed, 15 insertions(+), 6 deletions(-) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index 1d330014e7f..c32a46d7dd0 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -3,7 +3,7 @@ from yt.fields.derived_field import ValidateParameter, ValidateSpatial from yt.funcs import just_one from yt.geometry.geometry_handler import is_curvilinear -from yt.utilities.exceptions import YTDimensionalityError +from yt.utilities.exceptions import YTDimensionalityError, YTFieldNotFound from .field_plugin_registry import register_field_plugin from .vector_operations import create_magnitude_field, create_squared_field @@ -409,13 +409,18 @@ def _shear(field, data): (it's just like vorticity except add the derivative pairs instead of subtracting them) """ - if data.ds.dimensionality == 1: - raise YTDimensionalityError("shear is meaningless in 1D") + if data.ds.geometry != "cartesian": raise NotImplementedError("shear is only supported in cartesian geometries") - vx = data[ftype, "relative_velocity_x"] - vy = data[ftype, "relative_velocity_y"] + try: + vx = data[ftype, "relative_velocity_x"] + vy = data[ftype, "relative_velocity_y"] + except YTFieldNotFound as e: + raise YTDimensionalityError( + "shear computation requires 2 velocity components" + ) from e + dvydx = ( vy[sl_right, sl_center, sl_center] - vy[sl_left, sl_center, sl_center] ) / (div_fac * just_one(data["index", "dx"])) @@ -425,7 +430,7 @@ def _shear(field, data): f = (dvydx + dvxdy) ** 2.0 del dvydx, dvxdy - if data.ds.dimensionality > 2: + try: vz = data[ftype, "relative_velocity_z"] dvzdy = ( vz[sl_center, sl_right, sl_center] - vz[sl_center, sl_left, sl_center] @@ -443,6 +448,10 @@ def _shear(field, data): ) / (div_fac * just_one(data["index", "dx"])) f += (dvxdz + dvzdx) ** 2.0 del dvxdz, dvzdx + except YTFieldNotFound: + # the absence of a z velocity component is not blocking + pass + np.sqrt(f, out=f) new_field = data.ds.arr(np.zeros_like(data[ftype, "velocity_x"]), f.units) new_field[sl_center, sl_center, sl_center] = f From 22812a70b462282f3316b333ddfe066500838fd6 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 08:00:51 +0200 Subject: [PATCH 547/653] cripple _shear_mach def to see what happens --- yt/fields/fluid_vector_fields.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index c32a46d7dd0..a4162862033 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -513,7 +513,10 @@ def _shear_mach(field, data): vx = data[ftype, "relative_velocity_x"] vy = data[ftype, "relative_velocity_y"] dvydx = ( - vy[sl_right, sl_center, sl_center] - vy[sl_left, sl_center, sl_center] + # tmp edit to see if tests catch it + vy[ + sl_right, sl_center, sl_center + ] # - vy[sl_left, sl_center, sl_center] ) / div_fac dvxdy = ( vx[sl_center, sl_right, sl_center] - vx[sl_center, sl_left, sl_center] From 048f0e9a220982c1fe99b97ac8fb472014274dab Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 09:39:22 +0200 Subject: [PATCH 548/653] apply same pattern in shear_mach --- yt/fields/fluid_vector_fields.py | 36 ++++++++++++++++++++------------ 1 file changed, 23 insertions(+), 13 deletions(-) diff --git a/yt/fields/fluid_vector_fields.py b/yt/fields/fluid_vector_fields.py index a4162862033..f5498a6427d 100644 --- a/yt/fields/fluid_vector_fields.py +++ b/yt/fields/fluid_vector_fields.py @@ -509,21 +509,28 @@ def _shear_mach(field, data): Shear (Mach) = [(dvx + dvy)^2 + (dvz + dvy)^2 + (dvx + dvz)^2 ]^(0.5) / c_sound """ - if data.ds.dimensionality > 1: + + if data.ds.geometry != "cartesian": + raise NotImplementedError( + "shear_mach is only supported in cartesian geometries" + ) + + try: vx = data[ftype, "relative_velocity_x"] vy = data[ftype, "relative_velocity_y"] - dvydx = ( - # tmp edit to see if tests catch it - vy[ - sl_right, sl_center, sl_center - ] # - vy[sl_left, sl_center, sl_center] - ) / div_fac - dvxdy = ( - vx[sl_center, sl_right, sl_center] - vx[sl_center, sl_left, sl_center] - ) / div_fac - f = (dvydx + dvxdy) ** 2.0 - del dvydx, dvxdy - if data.ds.dimensionality > 2: + except YTFieldNotFound as e: + raise YTDimensionalityError( + "shear_mach computation requires 2 velocity components" + ) from e + dvydx = ( + vy[sl_right, sl_center, sl_center] - vy[sl_left, sl_center, sl_center] + ) / div_fac + dvxdy = ( + vx[sl_center, sl_right, sl_center] - vx[sl_center, sl_left, sl_center] + ) / div_fac + f = (dvydx + dvxdy) ** 2.0 + del dvydx, dvxdy + try: vz = data[ftype, "relative_velocity_z"] dvzdy = ( vz[sl_center, sl_right, sl_center] - vz[sl_center, sl_left, sl_center] @@ -541,6 +548,9 @@ def _shear_mach(field, data): ) / div_fac f += (dvxdz + dvzdx) ** 2.0 del dvxdz, dvzdx + except YTFieldNotFound: + # the absence of a z velocity component is not blocking + pass f *= ( 2.0 ** data["index", "grid_level"][sl_center, sl_center, sl_center] / data[ftype, "sound_speed"][sl_center, sl_center, sl_center] From cbf8b76fee554899c5e3477e58f59712034f2f94 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 16 Aug 2020 19:20:50 +0200 Subject: [PATCH 549/653] add a filter to ytLogger to avoid dupplicated successive entries --- yt/utilities/logger.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/yt/utilities/logger.py b/yt/utilities/logger.py index b83b2eaaa24..66b8a42022d 100644 --- a/yt/utilities/logger.py +++ b/yt/utilities/logger.py @@ -42,6 +42,22 @@ def new(*args): ytLogger = logging.getLogger("yt") +class DuplicateFilter(logging.Filter): + """A filter that removes duplicated successive log entries.""" + + # source + # https://stackoverflow.com/questions/44691558/suppress-multiple-messages-with-same-content-in-python-logging-module-aka-log-co # noqa + def filter(self, record): + current_log = (record.module, record.levelno, record.msg) + if current_log != getattr(self, "last_log", None): + self.last_log = current_log + return True + return False + + +ytLogger.addFilter(DuplicateFilter()) + + def disable_stream_logging(): if len(ytLogger.handlers) > 0: ytLogger.removeHandler(ytLogger.handlers[0]) From 8feaf0ab31f8c27a685f43eef63f7dddb77b44bd Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 16 Aug 2020 23:10:18 +0200 Subject: [PATCH 550/653] remove dead code (deactivated tests and statements after raise or return) --- yt/data_objects/tests/test_streamlines.py | 29 -------------- yt/data_objects/unstructured_mesh.py | 14 ------- yt/frontends/art/data_structures.py | 4 -- yt/frontends/sdf/tests/test_outputs.py | 40 ------------------- yt/geometry/grid_geometry_handler.py | 14 ------- yt/loaders.py | 10 +---- .../parallel_analysis_interface.py | 1 - yt/visualization/eps_writer.py | 7 ---- .../volume_rendering/old_camera.py | 36 ----------------- 9 files changed, 1 insertion(+), 154 deletions(-) delete mode 100644 yt/data_objects/tests/test_streamlines.py delete mode 100644 yt/frontends/sdf/tests/test_outputs.py diff --git a/yt/data_objects/tests/test_streamlines.py b/yt/data_objects/tests/test_streamlines.py deleted file mode 100644 index d3cbb32a502..00000000000 --- a/yt/data_objects/tests/test_streamlines.py +++ /dev/null @@ -1,29 +0,0 @@ -import numpy as np - -from yt.testing import assert_equal, assert_rel_equal, fake_random_ds -from yt.visualization.streamlines import Streamlines - - -def setup(): - from yt.config import ytcfg - - ytcfg["yt", "__withintesting"] = "True" - - -_fields = ("density", "velocity_x", "velocity_y", "velocity_z") - - -def test_covering_grid(): - return - # We decompose in different ways - cs = np.mgrid[0.47:0.53:2j, 0.47:0.53:2j, 0.47:0.53:2j] - cs = np.array([a.ravel() for a in cs]).T - length = (1.0 / 128) * 16 # 16 half-widths of a cell - for nprocs in [1, 2, 4, 8]: - ds = fake_random_ds(64, nprocs=nprocs, fields=_fields) - streams = Streamlines(ds, cs, length=length) - streams.integrate_through_volume() - for path in (streams.path(i) for i in range(8)): - assert_rel_equal(path["dts"].sum(), 1.0, 14) - assert_equal(np.all(path["t"] <= (1.0 + 1e-10)), True) - path["density"] diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/unstructured_mesh.py index d3a04a01d36..fa691ce879d 100644 --- a/yt/data_objects/unstructured_mesh.py +++ b/yt/data_objects/unstructured_mesh.py @@ -1,9 +1,7 @@ import numpy as np -import yt.geometry.particle_deposit as particle_deposit from yt.data_objects.data_containers import YTSelectionContainer from yt.funcs import mylog -from yt.utilities.exceptions import YTParticleDepositionNotImplemented from yt.utilities.lib.mesh_utilities import fill_fcoords, fill_fwidths @@ -97,18 +95,6 @@ def select_tcoords(self, dobj): def deposit(self, positions, fields=None, method=None, kernel_name="cubic"): raise NotImplementedError - # Here we perform our particle deposition. - cls = getattr(particle_deposit, f"deposit_{method}", None) - if cls is None: - raise YTParticleDepositionNotImplemented(method) - # We allocate number of zones, not number of octs - op = cls(self.ActiveDimensions.prod(), kernel_name) - op.initialize() - op.process_grid(self, positions, fields) - vals = op.finalize() - if vals is None: - return - return vals.reshape(self.ActiveDimensions, order="C") def select_blocks(self, selector): mask = self._get_selector_mask(selector) diff --git a/yt/frontends/art/data_structures.py b/yt/frontends/art/data_structures.py index 4c40f59335f..c28cfe4e2ac 100644 --- a/yt/frontends/art/data_structures.py +++ b/yt/frontends/art/data_structures.py @@ -934,7 +934,3 @@ def _read_amr_root(self, oct_handler): def included(self, selector): return True - if getattr(selector, "domain_id", None) is not None: - return selector.domain_id == self.domain_id - domain_ids = self.ds.index.oct_handler.domain_identify(selector) - return self.domain_id in domain_ids diff --git a/yt/frontends/sdf/tests/test_outputs.py b/yt/frontends/sdf/tests/test_outputs.py deleted file mode 100644 index 5624bbbb76e..00000000000 --- a/yt/frontends/sdf/tests/test_outputs.py +++ /dev/null @@ -1,40 +0,0 @@ -import socket -import urllib - -import numpy as np - -from yt.frontends.sdf.api import SDFDataset -from yt.testing import assert_equal, requires_module -from yt.visualization.api import ProjectionPlot - -_fields = ("deposit", "all_cic") -slac_scivis_data = "http://darksky.slac.stanford.edu/scivis2015/data/ds14_scivis_0128/ds14_scivis_0128_e4_dt04_1.0000" # NOQA E501 -ncsa_scivis_data = "http://use.yt/upload/744abba3" -scivis_data = ncsa_scivis_data - -# Answer on http://stackoverflow.com/questions/3764291/checking-network-connection -# Better answer on -# http://stackoverflow.com/questions/2712524/handling-urllib2s-timeout-python -def internet_on(): - try: - urllib.request.urlopen(scivis_data, timeout=1) - return True - except urllib.error.URLError: - return False - except socket.timeout: - return False - - -@requires_module("thingking") -def test_scivis(): - if not internet_on(): - return - return # HOTFIX: See discussion in 2334 - ds = SDFDataset(scivis_data) - if scivis_data == slac_scivis_data: - assert_equal(str(ds), "ds14_scivis_0128_e4_dt04_1.0000") - else: - assert_equal(str(ds), "744abba3") - ad = ds.all_data() - assert np.unique(ad["particle_position_x"]).size > 1 - ProjectionPlot(ds, "z", _fields) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 696ca7109cb..1602ca40cb0 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -12,7 +12,6 @@ from yt.geometry.geometry_handler import ChunkDataCache, Index, YTDataChunk from yt.utilities.definitions import MAXLEVEL from yt.utilities.logger import ytLogger as mylog -from yt.utilities.on_demand_imports import _h5py as h5py from .grid_container import GridTree, MatchPointsToGrids @@ -73,19 +72,6 @@ def parameters(self): def _detect_output_fields_backup(self): # grab fields from backup file as well, if present return - try: - backup_filename = self.dataset.backup_filename - f = h5py.File(backup_filename, mode="r") - g = f["data"] - grid = self.grids[0] # simply check one of the grids - grid_group = g["grid_%010i" % (grid.id - grid._id_offset)] - for field_name in grid_group: - if field_name != "particles": - self.field_list.append(field_name) - except KeyError: - return - except IOError: - return def select_grids(self, level): """ diff --git a/yt/loaders.py b/yt/loaders.py index 7105911fbd7..4913d9135fb 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1211,15 +1211,7 @@ def load_unstructured_mesh( field_units.update(_f_unit) sfh[i] = _data particle_types.update(set_particle_types(d)) - # Simple check for axis length correctness - if 0 and len(data) > 0: - fn = list(sorted(data))[0] - array_values = data[fn] - if array_values.size != connectivity.shape[0]: - mylog.error( - "Dimensions of array must be one fewer than the coordinate set." - ) - raise RuntimeError + grid_left_edges = domain_left_edge grid_right_edges = domain_right_edge grid_dimensions = domain_dimensions.reshape(nprocs, 3).astype("int32") diff --git a/yt/utilities/parallel_tools/parallel_analysis_interface.py b/yt/utilities/parallel_tools/parallel_analysis_interface.py index a6a5ce04322..2f2b953505a 100644 --- a/yt/utilities/parallel_tools/parallel_analysis_interface.py +++ b/yt/utilities/parallel_tools/parallel_analysis_interface.py @@ -1270,7 +1270,6 @@ def partition_region_3d(self, left_edge, right_edge, padding=0.0, rank_ratio=1): LE, RE = left_edge[:], right_edge[:] if not self._distributed: raise NotImplementedError - return LE, RE # , re cc = MPI.Compute_dims(self.comm.size / rank_ratio, 3) mi = self.comm.rank % (self.comm.size // rank_ratio) diff --git a/yt/visualization/eps_writer.py b/yt/visualization/eps_writer.py index 0e77f9428ac..f9bd276d9be 100644 --- a/yt/visualization/eps_writer.py +++ b/yt/visualization/eps_writer.py @@ -725,7 +725,6 @@ def colorbar( imsize = (256, 1) else: raise RuntimeError(f"orientation {orientation} unknown") - return # If shrink is a scalar, then convert into tuple if not isinstance(shrink, (tuple, list)): @@ -1271,10 +1270,8 @@ def multiplot( "Number of images (%d) doesn't match nrow(%d)" " x ncol(%d)." % (len(images), nrow, ncol) ) - return if yt_plots is None and images is None: raise RuntimeError("Must supply either yt_plots or image filenames.") - return if yt_plots is not None and images is not None: mylog.warning("Given both images and yt plots. Ignoring images.") if yt_plots is not None: @@ -1427,7 +1424,6 @@ def multiplot( raise RuntimeError( f"{fields[index]} not found in cb_location dict" ) - return orientation = cb_location[fields[index]] elif isinstance(cb_location, list): orientation = cb_location[index] @@ -1528,7 +1524,6 @@ def multiplot_yt(ncol, nrow, plots, fields=None, **kwargs): "Number of plots ({0}) is less " "than nrow({1}) x ncol({2}).".format(len(fields), nrow, ncol) ) - return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) elif isinstance(plots, list) and isinstance(plots[0], (PlotWindow, PhasePlot)): if len(plots) < nrow * ncol: @@ -1536,11 +1531,9 @@ def multiplot_yt(ncol, nrow, plots, fields=None, **kwargs): "Number of plots ({0}) is less " "than nrow({1}) x ncol({2}).".format(len(fields), nrow, ncol) ) - return figure = multiplot(ncol, nrow, yt_plots=plots, fields=fields, **kwargs) else: raise RuntimeError("Unknown plot type in multiplot_yt") - return return figure diff --git a/yt/visualization/volume_rendering/old_camera.py b/yt/visualization/volume_rendering/old_camera.py index 3edce0dcb92..8eafbcc9c27 100644 --- a/yt/visualization/volume_rendering/old_camera.py +++ b/yt/visualization/volume_rendering/old_camera.py @@ -1499,41 +1499,6 @@ def __init__( ): mylog.error("I am sorry, HEALpix Camera does not work yet in 3.0") raise NotImplementedError - ParallelAnalysisInterface.__init__(self) - if ds is not None: - self.ds = ds - self.center = np.array(center, dtype="float64") - self.radius = radius - self.inner_radius = inner_radius - self.nside = nside - self.use_kd = use_kd - if transfer_function is None: - transfer_function = ProjectionTransferFunction() - self.transfer_function = transfer_function - - if isinstance(self.transfer_function, ProjectionTransferFunction): - self._sampler_object = InterpolatedProjectionSampler - self._needs_tf = 0 - else: - self._sampler_object = VolumeRenderSampler - self._needs_tf = 1 - - if fields is None: - fields = ["density"] - self.fields = fields - self.sub_samples = sub_samples - self.log_fields = log_fields - dd = ds.all_data() - efields = dd._determine_fields(self.fields) - if self.log_fields is None: - self.log_fields = [self.ds._get_field_info(*f).take_log for f in efields] - self.use_light = use_light - self.light_dir = None - self.light_rgba = None - if volume is None: - volume = AMRKDTree(self.ds, data_source=self.data_source) - self.use_kd = isinstance(volume, AMRKDTree) - self.volume = volume def new_image(self): image = np.zeros((12 * self.nside ** 2, 1, 4), dtype="float64", order="C") @@ -2099,7 +2064,6 @@ def _make_wf(f, w): def temp_weightfield(a, b): tr = b[f].astype("float64") * b[w] return b.apply_units(tr, a.units) - return tr return temp_weightfield From 283aebd5f70e608cac12f47d34de946deb3f5d7d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 09:59:45 +0200 Subject: [PATCH 551/653] replace remaining occurences of deprecated h5py dataset.value attribute --- yt/frontends/gamer/data_structures.py | 8 ++++---- yt/frontends/gdf/data_structures.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/yt/frontends/gamer/data_structures.py b/yt/frontends/gamer/data_structures.py index bc8a882264a..cac4e665080 100644 --- a/yt/frontends/gamer/data_structures.py +++ b/yt/frontends/gamer/data_structures.py @@ -60,7 +60,7 @@ def _count_grids(self): def _parse_index(self): parameters = self.dataset.parameters gid0 = 0 - grid_corner = self._handle["Tree/Corner"].value[:: self.pgroup] + grid_corner = self._handle["Tree/Corner"][()][:: self.pgroup] convert2physical = self._handle["Tree/Corner"].attrs["Cvt2Phy"] self.grid_dimensions[:] = parameters["PatchSize"] * self.refine_by @@ -99,7 +99,7 @@ def _parse_index(self): # number of particles in each grid try: self.grid_particle_count[:] = np.sum( - self._handle["Tree/NPar"].value.reshape(-1, self.pgroup), axis=1 + self._handle["Tree/NPar"][()].reshape(-1, self.pgroup), axis=1 )[:, None] except KeyError: self.grid_particle_count[:] = 0.0 @@ -113,7 +113,7 @@ def _parse_index(self): ) def _populate_grid_objects(self): - son_list = self._handle["Tree/Son"].value + son_list = self._handle["Tree/Son"][()] for gid in range(self.num_grids): grid = self.grids[gid] @@ -139,7 +139,7 @@ def _populate_grid_objects(self): def _validate_parent_children_relationship(self): mylog.info("Validating the parent-children relationship ...") - father_list = self._handle["Tree/Father"].value + father_list = self._handle["Tree/Father"][()] for grid in self.grids: # parent->children == itself diff --git a/yt/frontends/gdf/data_structures.py b/yt/frontends/gdf/data_structures.py index 33f7a7b8389..7d32a6130ec 100644 --- a/yt/frontends/gdf/data_structures.py +++ b/yt/frontends/gdf/data_structures.py @@ -202,7 +202,7 @@ def _set_code_unit_attributes(self): if "dataset_units" in h5f: for unit_name in h5f["/dataset_units"]: current_unit = h5f[f"/dataset_units/{unit_name}"] - value = current_unit.value + value = current_unit[()] unit = current_unit.attrs["unit"] # need to convert to a Unit object and check dimensions # because unit can be things like From 71ff716c171827e23333f6be17872f751b6016d9 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 14:51:53 +0200 Subject: [PATCH 552/653] hotfix a refactoring mistake that broke load_sample --- yt/loaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/loaders.py b/yt/loaders.py index 7105911fbd7..160a8274027 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1332,7 +1332,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): try: import tqdm # noqa: F401 - downloader = pooch.HTTPDownloader(progressbar=True) + downloader = pooch.pooch.HTTPDownloader(progressbar=True) except ImportError: mylog.warning("tqdm is not installed, progress bar can not be displayed.") From b4093b4713ba915c631d568190849e11c5994eaf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 15:10:05 +0200 Subject: [PATCH 553/653] more explicit mode arguments --- yt/frontends/arepo/io.py | 2 +- yt/frontends/gadget/io.py | 4 ++-- yt/frontends/gadget_fof/io.py | 2 +- yt/frontends/halo_catalog/data_structures.py | 2 +- yt/frontends/halo_catalog/io.py | 6 +++--- yt/frontends/swift/data_structures.py | 4 ++-- yt/frontends/swift/io.py | 12 ++++++------ yt/frontends/swift/tests/test_outputs.py | 4 ++-- yt/frontends/ytdata/io.py | 2 +- 9 files changed, 19 insertions(+), 19 deletions(-) diff --git a/yt/frontends/arepo/io.py b/yt/frontends/arepo/io.py index 9147e74e2ca..0af4b993016 100644 --- a/yt/frontends/arepo/io.py +++ b/yt/frontends/arepo/io.py @@ -15,7 +15,7 @@ def _get_smoothing_length(self, data_file, position_dtype, position_shape): ptype = self.ds._sph_ptypes[0] ind = int(ptype[-1]) si, ei = data_file.start, data_file.end - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") pcount = np.clip(pcount - si, 0, ei - si) # Arepo cells do not have "smoothing lengths" by definition, so diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 500ed82f03a..f6a1d204dde 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -69,7 +69,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, data_file, needed_ptype=None): si, ei = data_file.start, data_file.end - f = h5py.File(data_file.filename, "r") + f = h5py.File(data_file.filename, mode="r") pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") np.clip(pcount - si, 0, ei - si, out=pcount) pcount = pcount.sum() @@ -93,7 +93,7 @@ def _generate_smoothing_length(self, index): return hsml_fn = data_files[0].filename.replace(".hdf5", ".hsml.hdf5") if os.path.exists(hsml_fn): - with h5py.File(hsml_fn, "r") as f: + with h5py.File(hsml_fn, mode="r") as f: file_hash = f.attrs["q"] if file_hash != self.ds._file_hash: mylog.warning("Replacing hsml files.") diff --git a/yt/frontends/gadget_fof/io.py b/yt/frontends/gadget_fof/io.py index 290cbef449f..b44c0e4523b 100644 --- a/yt/frontends/gadget_fof/io.py +++ b/yt/frontends/gadget_fof/io.py @@ -39,7 +39,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, data_file): ptypes = self.ds.particle_types_raw - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: for ptype in sorted(ptypes): pcount = data_file.total_particles[ptype] if pcount == 0: diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 52fae36e5c7..05636dde14e 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -272,7 +272,7 @@ def _get_halo_values(self, ptype, identifiers, fields, f=None): my_f = ( f if self.data_files[i_scalar].filename == filename - else h5py.File(self.data_files[i_scalar].filename, "r") + else h5py.File(self.data_files[i_scalar].filename, mode="r") ) for field in fields: diff --git a/yt/frontends/halo_catalog/io.py b/yt/frontends/halo_catalog/io.py index d8365918a27..f8ec076ecb7 100644 --- a/yt/frontends/halo_catalog/io.py +++ b/yt/frontends/halo_catalog/io.py @@ -36,7 +36,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, data_file): pn = "particle_position_%s" - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: units = parse_h5_attr(f[pn % "x"], "units") x, y, z = ( self.ds.arr(f[pn % ax][()].astype("float64"), units) for ax in "xyz" @@ -79,7 +79,7 @@ def _count_particles(self, data_file): return {"halos": nhalos} def _identify_fields(self, data_file): - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: fields = [ ("halos", field) for field in f if not isinstance(f[field], h5py.Group) ] @@ -122,7 +122,7 @@ class IOHandlerYTHalo(HaloDatasetIOHandler, IOHandlerYTHaloCatalog): _dataset_type = "ythalo" def _identify_fields(self, data_file): - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: scalar_fields = [ ("halos", field) for field in f if not isinstance(f[field], h5py.Group) ] diff --git a/yt/frontends/swift/data_structures.py b/yt/frontends/swift/data_structures.py index f40a780e191..cddf5e83a3f 100644 --- a/yt/frontends/swift/data_structures.py +++ b/yt/frontends/swift/data_structures.py @@ -68,7 +68,7 @@ def _get_info_attributes(self, dataset): of the information in the Header.attrs. """ - with h5py.File(self.filename, "r") as handle: + with h5py.File(self.filename, mode="r") as handle: header = dict(handle[dataset].attrs) return header @@ -172,7 +172,7 @@ def _is_valid(self, *args, **kwargs): valid = True # Attempt to open the file, if it's not a hdf5 then this will fail: try: - handle = h5py.File(filename, "r") + handle = h5py.File(filename, mode="r") valid = handle["Header"].attrs["Code"].decode("utf-8") == "SWIFT" handle.close() except (IOError, KeyError, ImportError): diff --git a/yt/frontends/swift/io.py b/yt/frontends/swift/io.py index 03fea0116a5..72191e1370f 100644 --- a/yt/frontends/swift/io.py +++ b/yt/frontends/swift/io.py @@ -28,7 +28,7 @@ def _read_particle_coords(self, chunks, ptf): sub_files.update(obj.data_files) for sub_file in sorted(sub_files, key=lambda x: x.filename): si, ei = sub_file.start, sub_file.end - f = h5py.File(sub_file.filename, "r") + f = h5py.File(sub_file.filename, mode="r") # This double-reads for ptype in sorted(ptf): if sub_file.total_particles[ptype] == 0: @@ -44,7 +44,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, sub_file, needed_ptype=None): si, ei = sub_file.start, sub_file.end - f = h5py.File(sub_file.filename, "r") + f = h5py.File(sub_file.filename, mode="r") pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") np.clip(pcount - si, 0, ei - si, out=pcount) pcount = pcount.sum() @@ -67,7 +67,7 @@ def _get_smoothing_length(self, sub_file, pdtype=None, pshape=None): ptype = self.ds._sph_ptypes[0] ind = int(ptype[-1]) si, ei = sub_file.start, sub_file.end - with h5py.File(sub_file.filename, "r") as f: + with h5py.File(sub_file.filename, mode="r") as f: pcount = f["/Header"].attrs["NumPart_ThisFile"][ind].astype("int") pcount = np.clip(pcount - si, 0, ei - si) # we upscale to float64 @@ -84,7 +84,7 @@ def _read_particle_fields(self, chunks, ptf, selector): for sub_file in sorted(sub_files, key=lambda x: x.filename): si, ei = sub_file.start, sub_file.end - f = h5py.File(sub_file.filename, "r") + f = h5py.File(sub_file.filename, mode="r") for ptype, field_list in sorted(ptf.items()): if sub_file.total_particles[ptype] == 0: continue @@ -113,7 +113,7 @@ def _read_particle_fields(self, chunks, ptf, selector): def _count_particles(self, data_file): si, ei = data_file.start, data_file.end - f = h5py.File(data_file.filename, "r") + f = h5py.File(data_file.filename, mode="r") pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") f.close() # if this data_file was a sub_file, then we just extract the region @@ -124,7 +124,7 @@ def _count_particles(self, data_file): return npart def _identify_fields(self, data_file): - f = h5py.File(data_file.filename, "r") + f = h5py.File(data_file.filename, mode="r") fields = [] cname = self.ds._particle_coordinates_name # Coordinates mname = self.ds._particle_mass_name # Coordinates diff --git a/yt/frontends/swift/tests/test_outputs.py b/yt/frontends/swift/tests/test_outputs.py index ad5236d5bc7..fecdafc71de 100644 --- a/yt/frontends/swift/tests/test_outputs.py +++ b/yt/frontends/swift/tests/test_outputs.py @@ -21,7 +21,7 @@ def test_non_cosmo_dataset(): yt_coords = ad[(field[0], "position")] # load some data the old fashioned way - fh = h5py.File(ds.parameter_filename, "r") + fh = h5py.File(ds.parameter_filename, mode="r") part_data = fh["PartType0"] # set up a conversion factor by loading the unit mas and unit length in cm, @@ -67,7 +67,7 @@ def test_cosmo_dataset(): yt_coords = ad[(field[0], "position")] # load some data the old fashioned way - fh = h5py.File(ds.parameter_filename, "r") + fh = h5py.File(ds.parameter_filename, mode="r") part_data = fh["PartType0"] # set up a conversion factor by loading the unit mas and unit length in cm, diff --git a/yt/frontends/ytdata/io.py b/yt/frontends/ytdata/io.py index 987fd9d8a22..b8820636144 100644 --- a/yt/frontends/ytdata/io.py +++ b/yt/frontends/ytdata/io.py @@ -178,7 +178,7 @@ def _read_fluid_selection(self, chunks, selector, fields, size): raise NotImplementedError def _yield_coordinates(self, data_file): - with h5py.File(data_file.filename, "r") as f: + with h5py.File(data_file.filename, mode="r") as f: for ptype in f.keys(): if "x" not in f[ptype].keys(): continue From aaf8c2b6c146d93f5f629f57cd3d840b4161708b Mon Sep 17 00:00:00 2001 From: Madicken Munk Date: Mon, 17 Aug 2020 10:33:49 -0500 Subject: [PATCH 554/653] change fn back to name in load_sample --- yt/loaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index 160a8274027..50f27f7e635 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1291,7 +1291,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): Parameters ---------- - fn : str or None + name : str or None The name of the sample data to load. This is generally the name of the folder of the dataset. For IsolatedGalaxy, the name would be `IsolatedGalaxy`. If `None` is supplied, the return value @@ -1312,7 +1312,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): fido = PoochHandle() - if fn is None: + if name is None: keys = [] for key in fido._registry: for ext in _extensions_to_strip: From 48be3c7e9dfa4c840f0c0b200bcbe26b96be2c30 Mon Sep 17 00:00:00 2001 From: Madicken Munk Date: Mon, 17 Aug 2020 10:35:25 -0500 Subject: [PATCH 555/653] fix Untar() import from pooch --- yt/loaders.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/loaders.py b/yt/loaders.py index 50f27f7e635..a9603bc13db 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1337,7 +1337,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): mylog.warning("tqdm is not installed, progress bar can not be displayed.") if extension == "h5": - processor = pooch.Untar() + processor = pooch.pooch.Untar() else: # we are going to assume most files that exist on the hub are # compressed in .tar folders. Some may not. From d5c2ad4be05632d2be8cbfabf933dfc997a208cb Mon Sep 17 00:00:00 2001 From: Madicken Munk Date: Mon, 17 Aug 2020 10:41:54 -0500 Subject: [PATCH 556/653] use name as arg in load_sample --- yt/loaders.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index a9603bc13db..a17b6dfc0cc 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1284,7 +1284,7 @@ def flatten(l): # If not, it will download it. -def load_sample(fn=None, specific_file=None, pbar=True): +def load_sample(name=None, specific_file=None, pbar=True): """ Load sample data with yt. Simple wrapper around yt.load to include fetching data with pooch. @@ -1324,7 +1324,7 @@ def load_sample(fn=None, specific_file=None, pbar=True): base_path = fido.pooch_obj.path registered_fname, name, extension = fido._validate_sample_fname( - fn + name ) # todo: make this part of the class downloader = None From 85fb081e611e85c4741c2191750de16adfae161e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 15:21:17 +0200 Subject: [PATCH 557/653] fix numpy deprecation warnings for non-tuple sequence in multi-dim indexing --- yt/data_objects/grid_patch.py | 2 +- yt/frontends/boxlib/io.py | 12 ++++++++---- 2 files changed, 9 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index de79f168c1c..c7e64fe0a57 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -414,7 +414,7 @@ def select(self, selector, source, dest, offset): else: slices = get_nodal_slices(source.shape, nodal_flag, dim) for i, sl in enumerate(slices): - dest[offset : offset + count, i] = source[sl][np.squeeze(mask)] + dest[offset : offset + count, i] = source[tuple(sl)][np.squeeze(mask)] return count def count(self, selector): diff --git a/yt/frontends/boxlib/io.py b/yt/frontends/boxlib/io.py index 379fa0f78f5..4c21ba72fae 100644 --- a/yt/frontends/boxlib/io.py +++ b/yt/frontends/boxlib/io.py @@ -82,10 +82,14 @@ def _read_raw_field(self, grid, field): arr = np.fromfile(f, "float64", np.product(shape)) arr = arr.reshape(shape, order="F") return arr[ - [ - slice(None) if (nghost[dim] == 0) else slice(nghost[dim], -nghost[dim]) - for dim in range(self.ds.dimensionality) - ] + tuple( + [ + slice(None) + if (nghost[dim] == 0) + else slice(nghost[dim], -nghost[dim]) + for dim in range(self.ds.dimensionality) + ] + ) ] def _read_chunk_data(self, chunk, fields): From 7c7b703b1c3460d425c5cfc970b17d700367a5e0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 19:54:01 +0200 Subject: [PATCH 558/653] use explicit kwarg mode= in h5py.File calls --- yt/frontends/open_pmd/data_structures.py | 14 +++++++------- yt/geometry/geometry_handler.py | 4 ++-- 2 files changed, 9 insertions(+), 9 deletions(-) diff --git a/yt/frontends/open_pmd/data_structures.py b/yt/frontends/open_pmd/data_structures.py index 5e82529cf4b..878e5e2fc9e 100644 --- a/yt/frontends/open_pmd/data_structures.py +++ b/yt/frontends/open_pmd/data_structures.py @@ -15,7 +15,7 @@ from yt.geometry.grid_geometry_handler import GridIndex from yt.utilities.file_handler import HDF5FileHandler, warn_h5py from yt.utilities.logger import ytLogger as mylog -from yt.utilities.on_demand_imports import _h5py as h5 +from yt.utilities.on_demand_imports import _h5py as h5py ompd_known_versions = [ StrictVersion("1.0.0"), @@ -133,7 +133,7 @@ def _detect_output_fields(self): for axis in mesh.keys(): mesh_fields.append(mname.replace("_", "-") + "_" + axis) except AttributeError: - # This is a h5.Dataset (i.e. no axes) + # This is a h5py.Dataset (i.e. no axes) mesh_fields.append(mname.replace("_", "-")) except (KeyError, TypeError, AttributeError): pass @@ -218,7 +218,7 @@ def _count_grids(self): meshes = f[bp + mp] for mname in meshes.keys(): mesh = meshes[mname] - if isinstance(mesh, h5.Group): + if isinstance(mesh, h5py.Group): shape = mesh[list(mesh.keys())[0]].shape else: shape = mesh.shape @@ -568,7 +568,7 @@ def _parse_parameter_file(self): meshes = f[bp + mp] for mname in meshes.keys(): mesh = meshes[mname] - if isinstance(mesh, h5.Group): + if isinstance(mesh, h5py.Group): shape = np.asarray(mesh[list(mesh.keys())[0]].shape) else: shape = np.asarray(mesh.shape) @@ -612,7 +612,7 @@ def _is_valid(self, *args, **kwargs): """ warn_h5py(args[0]) try: - with h5.File(args[0], "r") as f: + with h5py.File(args[0], mode="r") as f: attrs = list(f["/"].attrs.keys()) for i in opmd_required_attributes: if i not in attrs: @@ -641,7 +641,7 @@ class OpenPMDDatasetSeries(DatasetSeries): def __init__(self, filename): super(OpenPMDDatasetSeries, self).__init__([]) - self.handle = h5.File(filename, "r") + self.handle = h5py.File(filename, mode="r") self.filename = filename self._pre_outputs = sorted( np.asarray(list(self.handle["/data"].keys()), dtype=np.int) @@ -678,7 +678,7 @@ def __new__(cls, *args, **kwargs): def _is_valid(self, *args, **kwargs): warn_h5py(args[0]) try: - with h5.File(args[0], "r") as f: + with h5py.File(args[0], mode="r") as f: attrs = list(f["/"].attrs.keys()) for i in opmd_required_attributes: if i not in attrs: diff --git a/yt/geometry/geometry_handler.py b/yt/geometry/geometry_handler.py index 7becc737652..cca6cbb27bd 100644 --- a/yt/geometry/geometry_handler.py +++ b/yt/geometry/geometry_handler.py @@ -94,7 +94,7 @@ def _initialize_data_storage(self): self._data_mode = "r" self.__data_filename = fn - self._data_file = h5py.File(fn, self._data_mode) + self._data_file = h5py.File(fn, mode=self._data_mode) def __create_data_file(self, fn): # Note that this used to be parallel_root_only; it no longer is, @@ -143,7 +143,7 @@ def _reload_data_file(self, *args, **kwargs): return self._data_file.close() del self._data_file - self._data_file = h5py.File(self.__data_filename, self._data_mode) + self._data_file = h5py.File(self.__data_filename, mode=self._data_mode) def get_data(self, node, name): """ From f04aff50a57116f9b513ab79b9efaa02a2bdfb11 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Mon, 17 Aug 2020 19:58:31 +0200 Subject: [PATCH 559/653] add equivalent code example as suggested --- doc/source/faq/index.rst | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/doc/source/faq/index.rst b/doc/source/faq/index.rst index d8af617d3ba..f6e1d4a25ce 100644 --- a/doc/source/faq/index.rst +++ b/doc/source/faq/index.rst @@ -440,6 +440,12 @@ which would produce debug (as well as info, warning, and error) messages, or at yt.set_log_level("error") +This is the same as doing: + +.. code-block:: python + + yt.set_log_level(40) + which in this case would suppress everything below error messages. For reference, the numerical values corresponding to different log levels are: From ba7be85cadd53b0b3ec239770013a3ff12c4e3ff Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 09:06:36 +0200 Subject: [PATCH 560/653] fix message format --- yt/utilities/exceptions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index d952aafa48c..0ebb6df609e 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -21,9 +21,9 @@ def __init__(self, filename, args=None, kwargs=None): def __str__(self): msg = f"Could not determine input format from {self.filename}" - if self.args is not None: - msg += ", {self.args}" - if self.kwargs is not None: + if self.args: + msg += f", {self.args}" + if self.kwargs: msg += f", {self.kwargs}" msg += "." return msg From aa9f85811ae5e998fe766e98b8619bba40880e29 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 11:31:34 +0200 Subject: [PATCH 561/653] only trigger style checks on PR (avoid duplicated runs) --- .github/workflows/style-checks.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/style-checks.yaml b/.github/workflows/style-checks.yaml index 94d6d1d7020..21b4b48b70d 100644 --- a/.github/workflows/style-checks.yaml +++ b/.github/workflows/style-checks.yaml @@ -1,5 +1,5 @@ name: Style Checks -on: [push, pull_request] +on: [pull_request] jobs: flake8: From d97d9c10fe897098e52a399e44330101708b782c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 12:03:11 +0200 Subject: [PATCH 562/653] more explicit mode arguments --- yt/frontends/gadget/io.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/gadget/io.py b/yt/frontends/gadget/io.py index 500ed82f03a..b1e167eb0fd 100644 --- a/yt/frontends/gadget/io.py +++ b/yt/frontends/gadget/io.py @@ -69,7 +69,7 @@ def _read_particle_coords(self, chunks, ptf): def _yield_coordinates(self, data_file, needed_ptype=None): si, ei = data_file.start, data_file.end - f = h5py.File(data_file.filename, "r") + f = h5py.File(data_file.filename, mode="r") pcount = f["/Header"].attrs["NumPart_ThisFile"][:].astype("int") np.clip(pcount - si, 0, ei - si, out=pcount) pcount = pcount.sum() From 7c474d6e0eefaeef07f4b7a725ef528ede6c026f Mon Sep 17 00:00:00 2001 From: Britton Smith Date: Tue, 18 Aug 2020 13:33:24 +0100 Subject: [PATCH 563/653] Set property attributes correctly. --- yt/frontends/halo_catalog/data_structures.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 05636dde14e..763684dd4dc 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -306,8 +306,8 @@ def _setup_data_io(self): self.real_ds.index # inherit some things from parent index - for attr in ["data_files", "total_particles"]: - setattr(self, attr, getattr(self.real_ds.index, attr)) + self._data_files = self.real_ds.index.data_files + self._total_particles = self.real_ds.index.total_particles self._calculate_particle_index_starts() From 30e0a081b88c64d88aa1ea464bf22950ab1b3e86 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 17:56:39 +0200 Subject: [PATCH 564/653] skip tests whose requirements are not met --- yt/frontends/enzo/answer_testing_support.py | 8 +++++- yt/testing.py | 7 +++-- yt/utilities/answer_testing/framework.py | 30 ++++++++++++++++++--- yt/utilities/answer_testing/utils.py | 4 +-- 4 files changed, 41 insertions(+), 8 deletions(-) diff --git a/yt/frontends/enzo/answer_testing_support.py b/yt/frontends/enzo/answer_testing_support.py index b83ae8b6f46..d97b3a75f82 100644 --- a/yt/frontends/enzo/answer_testing_support.py +++ b/yt/frontends/enzo/answer_testing_support.py @@ -33,8 +33,14 @@ def __call__(self): def requires_outputlog(path=".", prefix=""): + from nose import SkipTest + def ffalse(func): - return lambda: None + @wraps(func) + def fskip(*args, **kwargs): + raise SkipTest + + return fskip def ftrue(func): @wraps(func) diff --git a/yt/testing.py b/yt/testing.py index c341e1a0253..5f5ff22da1f 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -788,11 +788,12 @@ def requires_module(module): being imported will not fail if the module is not installed on the testing platform. """ + from nose import SkipTest def ffalse(func): @functools.wraps(func) def false_wrapper(*args, **kwargs): - return None + raise SkipTest return false_wrapper @@ -812,12 +813,14 @@ def true_wrapper(*args, **kwargs): def requires_file(req_file): + from nose import SkipTest + path = ytcfg.get("yt", "test_data_dir") def ffalse(func): @functools.wraps(func) def false_wrapper(*args, **kwargs): - return None + raise SkipTest return false_wrapper diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index fa9cc896e5d..2fc0feeb176 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -1082,8 +1082,16 @@ def compare(self, new_result, old_result): def requires_sim(sim_fn, sim_type, big_data=False, file_check=False): + from functools import wraps + + from nose import SkipTest + def ffalse(func): - return lambda: None + @wraps(func) + def fskip(*args, **kwargs): + raise SkipTest + + return fskip def ftrue(func): return func @@ -1097,8 +1105,16 @@ def ftrue(func): def requires_answer_testing(): + from functools import wraps + + from nose import SkipTest + def ffalse(func): - return lambda: None + @wraps(func) + def fskip(*args, **kwargs): + raise SkipTest + + return fskip def ftrue(func): return func @@ -1110,8 +1126,16 @@ def ftrue(func): def requires_ds(ds_fn, big_data=False, file_check=False): + from functools import wraps + + from nose import SkipTest + def ffalse(func): - return lambda: None + @wraps(func) + def fskip(*args, **kwargs): + raise SkipTest + + return fskip def ftrue(func): return func diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 02ceaef19ed..6e48ab3eb8b 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -359,7 +359,7 @@ def ffalse(func): @functools.wraps(func) def skip(*args, **kwargs): msg = f"{ds_fn} not found, skipping {func.__name__}." - pytest.fail(msg) + pytest.skip(msg) return skip @@ -386,7 +386,7 @@ def ffalse(func): @functools.wraps(func) def skip(*args, **kwargs): msg = f"{sim_fn} not found, skipping {func.__name__}." - pytest.fail(msg) + pytest.skip(msg) return skip From a37a823e8046693747169db2a8f011d462830dc0 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 18:32:50 +0200 Subject: [PATCH 565/653] improve error message formatting for YTUnidentifiedDataType --- yt/utilities/exceptions.py | 13 ++++++------- 1 file changed, 6 insertions(+), 7 deletions(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 0ebb6df609e..4759644371b 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -20,13 +20,12 @@ def __init__(self, filename, args=None, kwargs=None): self.kwargs = kwargs def __str__(self): - msg = f"Could not determine input format from {self.filename}" - if self.args: - msg += f", {self.args}" - if self.kwargs: - msg += f", {self.kwargs}" - msg += "." - return msg + msg = [f"Could not determine input format from `'{self.filename}'"] + if self.args not in (None, ()): + msg.append(", ".join(str(a) for a in self.args)) + if self.kwargs not in (None, {}): + msg.append(", ".join(f"{k}={v}" for k, v in self.kwargs.items())) + return ", ".join(msg) + "`." class YTOutputNotIdentified(YTUnidentifiedDataType): From 1194cc625eb6cc8570a58fe32e5a0b168975662c Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 18 Aug 2020 09:31:55 -0500 Subject: [PATCH 566/653] Add an option to strictly enforce requires_file --- yt/config.py | 2 +- yt/testing.py | 2 ++ yt/utilities/answer_testing/framework.py | 4 ++-- 3 files changed, 5 insertions(+), 3 deletions(-) diff --git a/yt/config.py b/yt/config.py index 09967c7bd2c..a6f3f5ba6e7 100644 --- a/yt/config.py +++ b/yt/config.py @@ -16,6 +16,7 @@ __withintesting="False", __withinpytest="False", __parallel="False", + __strict_requires="False", __global_parallel_rank="0", __global_parallel_size="1", __topcomm_parallel_rank="0", @@ -32,7 +33,6 @@ reconstruct_index="True", test_storage_dir="/does/not/exist", test_data_dir="/does/not/exist", - requires_ds_strict="False", enzo_db="", hub_url="https://girder.hub.yt/api/v1", hub_api_key="", diff --git a/yt/testing.py b/yt/testing.py index 4091247bf8c..c9b05cdbe49 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -817,6 +817,8 @@ def requires_file(req_file): def ffalse(func): @functools.wraps(func) def false_wrapper(*args, **kwargs): + if ytcfg.getboolean("yt", "__strict_requires"): + raise FileNotFoundError(req_file) return None return false_wrapper diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index fa9cc896e5d..b8b6794ab7c 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -305,7 +305,7 @@ def can_run_ds(ds_fn, file_check=False): try: load(ds_fn) except FileNotFoundError: - if ytcfg.getboolean("yt", "requires_ds_strict"): + if ytcfg.getboolean("yt", "__strict_requires"): if result_storage is not None: result_storage["tainted"] = True raise @@ -325,7 +325,7 @@ def can_run_sim(sim_fn, sim_type, file_check=False): try: load_simulation(sim_fn, sim_type) except FileNotFoundError: - if ytcfg.getboolean("yt", "requires_ds_strict"): + if ytcfg.getboolean("yt", "__strict_requires"): if result_storage is not None: result_storage["tainted"] = True raise From 2a86eddf057bec8260389a64e20d113015b09095 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 21:37:41 +0200 Subject: [PATCH 567/653] more explicit mode arguments --- yt/frontends/open_pmd/fields.py | 8 +++++--- .../grid_data_format/conversion/conversion_athena.py | 6 +++--- yt/utilities/grid_data_format/tests/test_writer.py | 4 ++-- yt/utilities/minimal_representation.py | 8 ++++---- 4 files changed, 14 insertions(+), 12 deletions(-) diff --git a/yt/frontends/open_pmd/fields.py b/yt/frontends/open_pmd/fields.py index c82c6d4b4e8..7a42e294dae 100644 --- a/yt/frontends/open_pmd/fields.py +++ b/yt/frontends/open_pmd/fields.py @@ -5,7 +5,7 @@ from yt.frontends.open_pmd.misc import is_const_component, parse_unit_dimension from yt.units.yt_array import YTQuantity from yt.utilities.logger import ytLogger as mylog -from yt.utilities.on_demand_imports import _h5py as h5 +from yt.utilities.on_demand_imports import _h5py as h5py from yt.utilities.physical_constants import mu_0, speed_of_light @@ -151,7 +151,7 @@ def __init__(self, ds, field_list): fields = f[bp + mp] for fname in fields.keys(): field = fields[fname] - if isinstance(field, h5.Dataset) or is_const_component(field): + if isinstance(field, h5py.Dataset) or is_const_component(field): # Don't consider axes. # This appears to be a vector field of single dimensionality ytname = str("_".join([fname.replace("_", "-")])) @@ -198,7 +198,9 @@ def __init__(self, ds, field_list): # interpretation of the pfield particle_position is later # derived in setup_absolute_positions in the way yt expects ytattrib = "positionCoarse" - if isinstance(record, h5.Dataset) or is_const_component(record): + if isinstance(record, h5py.Dataset) or is_const_component( + record + ): name = ["particle", ytattrib] self.known_particle_fields += ( (str("_".join(name)), (unit, [], None)), diff --git a/yt/utilities/grid_data_format/conversion/conversion_athena.py b/yt/utilities/grid_data_format/conversion/conversion_athena.py index 7404b78dd51..f1300f19efc 100644 --- a/yt/utilities/grid_data_format/conversion/conversion_athena.py +++ b/yt/utilities/grid_data_format/conversion/conversion_athena.py @@ -3,7 +3,7 @@ import numpy as np from yt.utilities.grid_data_format.conversion.conversion_abc import Converter -from yt.utilities.on_demand_imports import _h5py as h5 +from yt.utilities.on_demand_imports import _h5py as h5py translation_dict = {} translation_dict["density"] = "density" @@ -304,7 +304,7 @@ def read_and_write_data(self, basename, ddn, gdf_name): this_field.attrs["field_to_cgs"] = np.float64("1.0") # For Now def convert(self, index=True, data=True): - self.handle = h5.File(self.outname, "a") + self.handle = h5py.File(self.outname, mode="a") if index: self.read_and_write_index(self.basename, self.ddn, self.outname) if data: @@ -413,7 +413,7 @@ def read_grid(self, filename): return grid def write_to_gdf(self, fn, grid): - f = h5.File(fn, "a") + f = h5py.File(fn, mode="a") ## --------- Begin level nodes --------- ## g = f.create_group("gridded_data_format") diff --git a/yt/utilities/grid_data_format/tests/test_writer.py b/yt/utilities/grid_data_format/tests/test_writer.py index 6f2eb9e9506..647a241f15c 100644 --- a/yt/utilities/grid_data_format/tests/test_writer.py +++ b/yt/utilities/grid_data_format/tests/test_writer.py @@ -6,7 +6,7 @@ from yt.loaders import load from yt.testing import assert_equal, fake_random_ds, requires_module from yt.utilities.grid_data_format.writer import write_to_gdf -from yt.utilities.on_demand_imports import _h5py as h5 +from yt.utilities.on_demand_imports import _h5py as h5py TEST_AUTHOR = "yt test runner" TEST_COMMENT = "Testing write_to_gdf" @@ -33,7 +33,7 @@ def test_write_gdf(): del test_ds assert isinstance(load(tmpfile), GDFDataset) - h5f = h5.File(tmpfile, "r") + h5f = h5py.File(tmpfile, mode="r") gdf = h5f["gridded_data_format"].attrs assert_equal(gdf["data_author"], TEST_AUTHOR) assert_equal(gdf["data_comment"], TEST_COMMENT) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index cfb24a48458..54775553381 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -7,7 +7,7 @@ from yt.funcs import compare_dicts, iterable from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.on_demand_imports import _h5py as h5 +from yt.utilities.on_demand_imports import _h5py as h5py def _sanitize_list(flist): @@ -51,7 +51,7 @@ def _deserialize_from_h5(g, ds): result[item] = ds.arr(g[item][:], g[item].attrs["units"]) else: result[item] = ds.quan(g[item][()], g[item].attrs["units"]) - elif isinstance(g[item], h5.Group): + elif isinstance(g[item], h5py.Group): result[item] = _deserialize_from_h5(g[item], ds) elif g[item] == "None": result[item] = None @@ -113,7 +113,7 @@ def store(self, storage): self._ds_mrep.store(storage) metadata, (final_name, chunks) = self._generate_post() metadata["obj_type"] = self.type - with h5.File(storage) as h5f: + with h5py.File(storage, mode="w") as h5f: dset = str(uuid4())[:8] h5f.create_group(dset) _serialize_to_h5(h5f[dset], metadata) @@ -226,7 +226,7 @@ def restore(self, storage, ds): if hasattr(self, "_ds_mrep"): self._ds_mrep.restore(storage, ds) metadata, (final_name, chunks) = self._generate_post() - with h5.File(storage, "r") as h5f: + with h5py.File(storage, mode="r") as h5f: for dset in h5f: stored_metadata = _deserialize_from_h5(h5f[dset], ds) if compare_dicts(metadata, stored_metadata): From 80395d0b09c805ec48739f4000598c22c2b8d584 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 21:45:55 +0200 Subject: [PATCH 568/653] a more elegant refactor for YTUnidentifiedDataType error message --- yt/utilities/exceptions.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index 4759644371b..d101f30266e 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -14,16 +14,16 @@ def __init__(self, message=None, ds=None): class YTUnidentifiedDataType(YTException): - def __init__(self, filename, args=None, kwargs=None): + def __init__(self, filename, *args, **kwargs): self.filename = filename self.args = args self.kwargs = kwargs def __str__(self): msg = [f"Could not determine input format from `'{self.filename}'"] - if self.args not in (None, ()): + if self.args: msg.append(", ".join(str(a) for a in self.args)) - if self.kwargs not in (None, {}): + if self.kwargs: msg.append(", ".join(f"{k}={v}" for k, v in self.kwargs.items())) return ", ".join(msg) + "`." From f1d6269c4c87a24a217e4f9ada1cd1c4eed79b2c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 21:46:41 +0200 Subject: [PATCH 569/653] separate return from def --- yt/utilities/exceptions.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/utilities/exceptions.py b/yt/utilities/exceptions.py index d101f30266e..93c06fcdb45 100644 --- a/yt/utilities/exceptions.py +++ b/yt/utilities/exceptions.py @@ -25,7 +25,8 @@ def __str__(self): msg.append(", ".join(str(a) for a in self.args)) if self.kwargs: msg.append(", ".join(f"{k}={v}" for k, v in self.kwargs.items())) - return ", ".join(msg) + "`." + msg = ", ".join(msg) + "`." + return msg class YTOutputNotIdentified(YTUnidentifiedDataType): From fc06b25d0204cd41a754eebd5ac0ce07eec1235a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 22:07:27 +0200 Subject: [PATCH 570/653] fix calls to YTUnidentifiedDataType --- yt/data_objects/time_series.py | 2 +- yt/loaders.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 3728ce5e7fb..60bcfb4d7b2 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -168,7 +168,7 @@ def __new__(cls, outputs, *args, **kwargs): try: ret._pre_outputs = outputs[:] except TypeError as e: - raise YTUnidentifiedDataType(outputs) from e + raise YTUnidentifiedDataType(outputs, *args, **kwargs) from e return ret def __init__( diff --git a/yt/loaders.py b/yt/loaders.py index 7105911fbd7..6509970d6fb 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -97,7 +97,7 @@ def load(fn, *args, **kwargs): if len(candidates) > 1: raise YTAmbiguousDataType(fn, candidates) - raise YTUnidentifiedDataType(fn, args, kwargs) + raise YTUnidentifiedDataType(fn, *args, **kwargs) def load_simulation(fn, simulation_type, find_outputs=False): From d4b7b35097804a5b6d87fea365d9150febfad631 Mon Sep 17 00:00:00 2001 From: Madicken Munk Date: Tue, 18 Aug 2020 15:23:45 -0500 Subject: [PATCH 571/653] change name back to fn --- yt/loaders.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index a17b6dfc0cc..2c2816f296e 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1284,14 +1284,14 @@ def flatten(l): # If not, it will download it. -def load_sample(name=None, specific_file=None, pbar=True): +def load_sample(fn=None, specific_file=None, pbar=True): """ Load sample data with yt. Simple wrapper around yt.load to include fetching data with pooch. Parameters ---------- - name : str or None + fn : str or None The name of the sample data to load. This is generally the name of the folder of the dataset. For IsolatedGalaxy, the name would be `IsolatedGalaxy`. If `None` is supplied, the return value @@ -1312,7 +1312,7 @@ def load_sample(name=None, specific_file=None, pbar=True): fido = PoochHandle() - if name is None: + if fn is None: keys = [] for key in fido._registry: for ext in _extensions_to_strip: @@ -1324,7 +1324,7 @@ def load_sample(name=None, specific_file=None, pbar=True): base_path = fido.pooch_obj.path registered_fname, name, extension = fido._validate_sample_fname( - name + fn ) # todo: make this part of the class downloader = None From a9d4d6e07dce70030fb1a7a09b36f2fbdeeab46f Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 19 Aug 2020 14:17:38 -0500 Subject: [PATCH 572/653] Fix uninitialized values in bbox selection This provided sane values for the reduced dimension while selecting region of a 2d mesh. Not a full and proper fix, but hopefully will make the annoying error on travis go away. --- yt/geometry/selection_routines.pxd | 1 + yt/geometry/selection_routines.pyx | 6 +++++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/yt/geometry/selection_routines.pxd b/yt/geometry/selection_routines.pxd index 8ad6c687d63..91a63f0b49a 100644 --- a/yt/geometry/selection_routines.pxd +++ b/yt/geometry/selection_routines.pxd @@ -20,6 +20,7 @@ cdef class SelectorObject: cdef public np.int32_t max_level cdef int overlap_cells cdef np.float64_t domain_width[3] + cdef np.float64_t domain_center[3] cdef bint periodicity[3] cdef bint _hash_initialized cdef np.int64_t _hash diff --git a/yt/geometry/selection_routines.pyx b/yt/geometry/selection_routines.pyx index 4c62fe3e3e6..b7c6a1450de 100644 --- a/yt/geometry/selection_routines.pyx +++ b/yt/geometry/selection_routines.pyx @@ -149,6 +149,7 @@ cdef class SelectorObject: DRE = _ensure_code(ds.domain_right_edge) for i in range(3): self.domain_width[i] = DRE[i] - DLE[i] + self.domain_center[i] = DLE[i] + 0.5 * self.domain_width[i] self.periodicity[i] = ds.periodicity[i] def get_periodicity(self): @@ -449,7 +450,7 @@ cdef class SelectorObject: mask = np.zeros(npoints, dtype='uint8') for i in range(npoints): selected = 0 - for k in range(ndim): + for k in range(3): le[k] = 1e60 re[k] = -1e60 for j in range(nv): @@ -457,6 +458,9 @@ cdef class SelectorObject: pos = coords[indices[i, j] - offset, k] le[k] = fmin(pos, le[k]) re[k] = fmax(pos, re[k]) + for k in range(2, ndim - 1, -1): + le[k] = self.domain_center[k] + re[k] = self.domain_center[k] selected = self.select_bbox(le, re) total += selected mask[i] = selected From c1b4ff197dc567754936b2946144fd1b602e172a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 20 Aug 2020 11:16:49 +0200 Subject: [PATCH 573/653] add a workflow to discover h5py bad practices --- .github/workflows/rules-checks.yaml | 33 +++++++++++++++++++++++++++++ 1 file changed, 33 insertions(+) create mode 100644 .github/workflows/rules-checks.yaml diff --git a/.github/workflows/rules-checks.yaml b/.github/workflows/rules-checks.yaml new file mode 100644 index 00000000000..4581309d2b6 --- /dev/null +++ b/.github/workflows/rules-checks.yaml @@ -0,0 +1,33 @@ +name: Auto review bad practice +on: [pull_request] + +jobs: + h5py-bad-practices: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@master + - name: check h5py import + # check that we don't alias h5py to h5 + # reason: discoverability is important since this module's api + # is unstable and we want to be able to check for potential future failures + id: h5-import + run: | + grep -r -n "import _h5py as h5" yt | grep -v "import _h5py as h5py" | cat > h5-imports.log + if [ -s h5-imports.log ] ; then + echo "Please do not import h5py as h5. Here are the faulty lines." + cat h5-imports.log + exit 1 + fi + + - name: check-h5py-filemode + id: h5-file-mode + # check that a mode argument is always present in calls to h5py.File() + # reason: the default value is different in older versions 'w' VS newer ones 'r' + run: | + egrep -r -n "h5py\.File\([^,]+\)" yt > h5-mode.log + if [ -s h5-mode.log ] ; then + echo "h5py.File() should never be called without an explicit mode argument." + echo "Here are the faulty lines." + cat h5-mode.log + exit 1 + fi From cf217882b2b7d9263a0a2cf60c506bf5ae772f43 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 20 Aug 2020 11:33:49 +0200 Subject: [PATCH 574/653] fix workflow --- .github/workflows/rules-checks.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/rules-checks.yaml b/.github/workflows/rules-checks.yaml index 4581309d2b6..af9f1598279 100644 --- a/.github/workflows/rules-checks.yaml +++ b/.github/workflows/rules-checks.yaml @@ -6,7 +6,7 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@master - - name: check h5py import + - name: check-h5py-import # check that we don't alias h5py to h5 # reason: discoverability is important since this module's api # is unstable and we want to be able to check for potential future failures @@ -24,7 +24,7 @@ jobs: # check that a mode argument is always present in calls to h5py.File() # reason: the default value is different in older versions 'w' VS newer ones 'r' run: | - egrep -r -n "h5py\.File\([^,]+\)" yt > h5-mode.log + grep -E -r -n "h5py\.File\([^,]+\)" yt | cat > h5-mode.log if [ -s h5-mode.log ] ; then echo "h5py.File() should never be called without an explicit mode argument." echo "Here are the faulty lines." From 86566117def27318e4ff7faf131ea6bbef2277f2 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Fri, 31 Jul 2020 11:00:07 -0500 Subject: [PATCH 575/653] squeezes out singleton dimensions from center --- yt/data_objects/data_containers.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 8d32e3a5bf3..83ce5a211d5 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -207,6 +207,11 @@ def _set_center(self, center): self.center = self.ds.find_min(center[4:])[1] else: self.center = self.ds.arr(center, "code_length", dtype="float64") + + if self.center.ndim > 1: + mylog.warning("Removing singleton dimensions from 'center'.") + self.center = np.squeeze(self.center) + self.set_field_parameter("center", self.center) def get_field_parameter(self, name, default=None): From bd25454fae78e51dbfb91164c54f96a9744bbea7 Mon Sep 17 00:00:00 2001 From: Chris Havlin Date: Thu, 20 Aug 2020 13:58:27 -0700 Subject: [PATCH 576/653] adding unit test --- yt/data_objects/tests/test_center_squeeze.py | 57 ++++++++++++++++++++ 1 file changed, 57 insertions(+) create mode 100644 yt/data_objects/tests/test_center_squeeze.py diff --git a/yt/data_objects/tests/test_center_squeeze.py b/yt/data_objects/tests/test_center_squeeze.py new file mode 100644 index 00000000000..220b28eba4f --- /dev/null +++ b/yt/data_objects/tests/test_center_squeeze.py @@ -0,0 +1,57 @@ +from yt.testing import assert_array_equal, fake_amr_ds, fake_particle_ds, fake_random_ds + + +def test_center_squeeze(): + # tests that selected values match when supplying center arrays of different shapes + # to the data container. + + # list of fields to populate fake datasets with + fldz = ("density", "velocity_x", "velocity_y", "velocity_z") + + # create and test amr, random and particle data + check_single_ds(fake_amr_ds(fields=fldz)) + check_single_ds(fake_random_ds(16, fields=fldz)) + check_single_ds(fake_particle_ds(npart=100), check_morton=False) + + +def check_single_ds(ds, check_morton=True): + # compares values for range of data containers using different center array shapes + + center = ds.domain_center # reference center array + + # build some data containers + sp0 = ds.sphere(center, 0.25) + sl0 = ds.slice(0, 0.25, center=center) + reg0 = ds.region(center, [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25]) + + # store morton indices of each + i_sp0 = None + i_sl0 = None + i_reg0 = None + if check_morton: + i_sp0 = sp0["index", "morton_index"] + i_sp0.sort() + i_sl0 = sl0["index", "morton_index"] + i_sl0.sort() + i_reg0 = reg0["index", "morton_index"] + i_reg0.sort() + + # create new containers for different shapes of the center array + for test_shape in [(1, 3), (1, 1, 3)]: + new_center = center.reshape(test_shape) + sp = ds.sphere(new_center, 0.25) + sl = ds.slice(0, 0.25, center=new_center) + reg = ds.region(new_center, [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25]) + + # compare each to the reference containers + for ob, ob0, i_ob0 in [(sp, sp0, i_sp0), (sl, sl0, i_sl0), (reg, reg0, i_reg0)]: + + # check that selection field values match the reference + for fld in ds.field_list: + assert_array_equal(ob[fld], ob0[fld]) + + if check_morton: + # check that morton indices match the reference + i_ob = ob["index", "morton_index"] + i_ob.sort() + assert_array_equal(i_ob, i_ob0) From d3e9d27c7a38ef0dd3830f1c21d35c41a193dac4 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Wed, 26 Aug 2020 10:38:28 -0700 Subject: [PATCH 577/653] Update yt/data_objects/data_containers.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/data_objects/data_containers.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 83ce5a211d5..65361b1bde3 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -209,7 +209,7 @@ def _set_center(self, center): self.center = self.ds.arr(center, "code_length", dtype="float64") if self.center.ndim > 1: - mylog.warning("Removing singleton dimensions from 'center'.") + mylog.debug("Removing singleton dimensions from 'center'.") self.center = np.squeeze(self.center) self.set_field_parameter("center", self.center) From 96d9b89eb5e901e02205afe059027b555fd84420 Mon Sep 17 00:00:00 2001 From: Chris Havlin Date: Wed, 26 Aug 2020 15:27:40 -0700 Subject: [PATCH 578/653] reduce test fields, add error check after squeeze --- yt/data_objects/data_containers.py | 6 ++++++ yt/data_objects/tests/test_center_squeeze.py | 10 +++++----- 2 files changed, 11 insertions(+), 5 deletions(-) diff --git a/yt/data_objects/data_containers.py b/yt/data_objects/data_containers.py index 65361b1bde3..c94318a40d4 100644 --- a/yt/data_objects/data_containers.py +++ b/yt/data_objects/data_containers.py @@ -211,6 +211,12 @@ def _set_center(self, center): if self.center.ndim > 1: mylog.debug("Removing singleton dimensions from 'center'.") self.center = np.squeeze(self.center) + if self.center.ndim > 1: + msg = ( + "center array must be 1 dimensional, supplied center has " + f"{self.center.ndim} dimensions with shape {self.center.shape}." + ) + raise YTException(msg) self.set_field_parameter("center", self.center) diff --git a/yt/data_objects/tests/test_center_squeeze.py b/yt/data_objects/tests/test_center_squeeze.py index 220b28eba4f..b1c26d3aa21 100644 --- a/yt/data_objects/tests/test_center_squeeze.py +++ b/yt/data_objects/tests/test_center_squeeze.py @@ -6,15 +6,15 @@ def test_center_squeeze(): # to the data container. # list of fields to populate fake datasets with - fldz = ("density", "velocity_x", "velocity_y", "velocity_z") + fldz = ("Density",) # create and test amr, random and particle data - check_single_ds(fake_amr_ds(fields=fldz)) - check_single_ds(fake_random_ds(16, fields=fldz)) - check_single_ds(fake_particle_ds(npart=100), check_morton=False) + check_single_ds(fake_amr_ds(fields=fldz), True) + check_single_ds(fake_random_ds(16, fields=fldz), True) + check_single_ds(fake_particle_ds(npart=100), False) -def check_single_ds(ds, check_morton=True): +def check_single_ds(ds, check_morton): # compares values for range of data containers using different center array shapes center = ds.domain_center # reference center array From 843bbfd11f78068d7620341994af7a4b987f93e4 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 10:37:08 +0200 Subject: [PATCH 579/653] Fix isort --- yt/utilities/lib/image_samplers.pyx | 35 +++++++++++++++++------------ 1 file changed, 21 insertions(+), 14 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 2559e359f62..6ba9a74a01b 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -15,26 +15,33 @@ Image sampler definitions import numpy as np cimport cython -from libc.stdlib cimport malloc, free -from libc.math cimport sqrt -from yt.utilities.lib.fp_utils cimport imin, fclip, i64clip -from field_interpolation_tables cimport \ - FieldInterpolationTable, FIT_initialize_table, FIT_eval_transfer,\ - FIT_eval_transfer_with_light cimport lenses -from .grid_traversal cimport walk_volume, sampler_function -from .fixed_interpolator cimport \ - offset_interpolate, \ - fast_interpolate, \ - trilinear_interpolate, \ - eval_gradient, \ - offset_fill, \ - vertex_interp +from field_interpolation_tables cimport ( + FieldInterpolationTable, + FIT_eval_transfer, + FIT_eval_transfer_with_light, + FIT_initialize_table, +) +from libc.math cimport sqrt +from libc.stdlib cimport free, malloc + +from yt.utilities.lib.fp_utils cimport fclip, i64clip, imin + +from .fixed_interpolator cimport ( + eval_gradient, + fast_interpolate, + offset_fill, + offset_interpolate, + trilinear_interpolate, + vertex_interp, +) +from .grid_traversal cimport sampler_function, walk_volume from yt.funcs import mylog from .cyoctree_raytracing cimport CythonOctreeRayTracing, RayInfo + cdef extern from "platform_dep.h": long int lrint(double x) nogil From 55aa3ca81b25105c50fb94e5b47cee20795490c3 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:01:02 +0200 Subject: [PATCH 580/653] Revert "Start fixing what I broke ..." This reverts commit c768d9d268e8ab2ed21956c27d1efd5220e627cc for off_axis_projection.py --- .../volume_rendering/off_axis_projection.py | 29 ++++++++++++++++--- 1 file changed, 25 insertions(+), 4 deletions(-) diff --git a/yt/visualization/volume_rendering/off_axis_projection.py b/yt/visualization/volume_rendering/off_axis_projection.py index 5b1c54ca944..6376f1021ca 100644 --- a/yt/visualization/volume_rendering/off_axis_projection.py +++ b/yt/visualization/volume_rendering/off_axis_projection.py @@ -3,12 +3,13 @@ from yt.data_objects.api import ImageArray from yt.funcs import iterable, mylog from yt.units.unit_object import Unit +from yt.utilities.lib.partitioned_grid import PartitionedGrid from yt.utilities.lib.pixelization_routines import ( normalization_2d_utility, off_axis_projection_SPH, ) -from .render_source import create_volume_source +from .render_source import KDTreeVolumeSource from .scene import Scene from .transfer_functions import ProjectionTransferFunction from .utils import data_source_or_all @@ -303,7 +304,7 @@ def off_axis_projection( funits = data_source.ds._get_field_info(item).units - vol = create_volume_source(data_source, item) + vol = KDTreeVolumeSource(data_source, item) vol.num_threads = num_threads if weight is None: vol.set_field(item) @@ -366,11 +367,31 @@ def temp_weightfield(a, b): if vol.weight_field is not None: fields.append(vol.weight_field) - vol._log_field = False - image = vol.render(camera) + mylog.debug("Casting rays") + + for (grid, mask) in data_source.blocks: + data = [] + for f in fields: + # strip units before multiplying by mask for speed + grid_data = grid[f] + units = grid_data.units + data.append(data_source.ds.arr(grid_data.d * mask, units, dtype="float64")) + pg = PartitionedGrid( + grid.id, + data, + mask.astype("uint8"), + grid.LeftEdge, + grid.RightEdge, + grid.ActiveDimensions.astype("int64"), + ) + grid.clear_data() + vol.sampler(pg, num_threads=num_threads) + + image = vol.finalize_image(camera, vol.sampler.aimage) image = ImageArray( image, funits, registry=data_source.ds.unit_registry, info=image.info ) + if weight is not None: data_source.ds.field_info.pop(("index", "temp_weightfield")) From 70a597fab04ae7d880fbcd8f3650e64768626041 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:15:42 +0200 Subject: [PATCH 581/653] More sensible naming conventions --- .../lib/{octree_raytracing.cpp => _octree_raytracing.hpp} | 7 ++----- .../{cyoctree_raytracing.pxd => _octree_raytracing.pxd} | 4 ++-- .../{cyoctree_raytracing.pyx => _octree_raytracing.pyx} | 2 +- .../lib/{pyoctree_raytracing.py => octree_raytracing.py} | 4 ++-- yt/visualization/volume_rendering/render_source.py | 2 +- 5 files changed, 8 insertions(+), 11 deletions(-) rename yt/utilities/lib/{octree_raytracing.cpp => _octree_raytracing.hpp} (98%) rename yt/utilities/lib/{cyoctree_raytracing.pxd => _octree_raytracing.pxd} (92%) rename yt/utilities/lib/{cyoctree_raytracing.pyx => _octree_raytracing.pyx} (97%) rename yt/utilities/lib/{pyoctree_raytracing.py => octree_raytracing.py} (96%) diff --git a/yt/utilities/lib/octree_raytracing.cpp b/yt/utilities/lib/_octree_raytracing.hpp similarity index 98% rename from yt/utilities/lib/octree_raytracing.cpp rename to yt/utilities/lib/_octree_raytracing.hpp index 5d4cc928e0a..105b5a988a9 100644 --- a/yt/utilities/lib/octree_raytracing.cpp +++ b/yt/utilities/lib/_octree_raytracing.hpp @@ -383,9 +383,6 @@ class Octree { }; -// Define some instances for easy use in Python +// Instanciate 3D octree for easier wrapping in Python template -using Octree3D = Octree; - -// Instantiate stuff -template class Octree; \ No newline at end of file +using Octree3D = Octree; \ No newline at end of file diff --git a/yt/utilities/lib/cyoctree_raytracing.pxd b/yt/utilities/lib/_octree_raytracing.pxd similarity index 92% rename from yt/utilities/lib/cyoctree_raytracing.pxd rename to yt/utilities/lib/_octree_raytracing.pxd index 4bf2e41ff48..3aebb754d08 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pxd +++ b/yt/utilities/lib/_octree_raytracing.pxd @@ -15,7 +15,7 @@ from .grid_traversal cimport sampler_function from .volume_container cimport VolumeContainer from .partitioned_grid cimport PartitionedGrid -cdef extern from "octree_raytracing.cpp": +cdef extern from "_octree_raytracing.hpp": cdef cppclass RayInfo[T]: vector[T] keys vector[double] t @@ -25,6 +25,6 @@ cdef extern from "octree_raytracing.cpp": void insert_node_no_ret(const int* ipos, const int lvl, T key) void cast_ray(double* origins, double* directions, vector[T] keyList, vector[double] tList) -cdef class CythonOctreeRayTracing: +cdef class _OctreeRayTracing: cdef Octree3D[int]* oct cdef int depth diff --git a/yt/utilities/lib/cyoctree_raytracing.pyx b/yt/utilities/lib/_octree_raytracing.pyx similarity index 97% rename from yt/utilities/lib/cyoctree_raytracing.pyx rename to yt/utilities/lib/_octree_raytracing.pyx index 761be09322e..372b8967061 100644 --- a/yt/utilities/lib/cyoctree_raytracing.pyx +++ b/yt/utilities/lib/_octree_raytracing.pyx @@ -24,7 +24,7 @@ from .volume_container cimport VolumeContainer DEF Nch = 4 -cdef class CythonOctreeRayTracing: +cdef class _OctreeRayTracing: def __init__(self, np.ndarray LE, np.ndarray RE, int depth): cdef double* LE_ptr = LE.data cdef double* RE_ptr = RE.data diff --git a/yt/utilities/lib/pyoctree_raytracing.py b/yt/utilities/lib/octree_raytracing.py similarity index 96% rename from yt/utilities/lib/pyoctree_raytracing.py rename to yt/utilities/lib/octree_raytracing.py index 218c7e4ca6d..3880d0c3db1 100644 --- a/yt/utilities/lib/pyoctree_raytracing.py +++ b/yt/utilities/lib/octree_raytracing.py @@ -3,7 +3,7 @@ import numpy as np from yt.funcs import mylog -from yt.utilities.lib.cyoctree_raytracing import CythonOctreeRayTracing +from yt.utilities.lib._octree_raytracing import _OctreeRayTracing class OctreeRayTracing(object): @@ -26,7 +26,7 @@ def __init__(self, data_source): # 1/2**depth depth = lvl_min + ds.max_level + 1 - self.octree = CythonOctreeRayTracing(LE, RE, depth) + self.octree = _OctreeRayTracing(LE, RE, depth) ds = data_source.ds xyz = np.stack([data_source[key].to("unitary").value for key in "xyz"], axis=-1) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index c5b499056c0..ba23c4b9a98 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -11,8 +11,8 @@ from yt.utilities.amr_kdtree.api import AMRKDTree from yt.utilities.lib.bounding_volume_hierarchy import BVH from yt.utilities.lib.misc_utilities import zlines, zpoints +from yt.utilities.lib.octree_raytracing import OctreeRayTracing from yt.utilities.lib.partitioned_grid import PartitionedGrid -from yt.utilities.lib.pyoctree_raytracing import OctreeRayTracing from yt.utilities.on_demand_imports import NotAModule from yt.utilities.parallel_tools.parallel_analysis_interface import ( ParallelAnalysisInterface, From d19f84585be6de2b82b3b8f60e0724e7402467a9 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:21:05 +0200 Subject: [PATCH 582/653] Fix pxd imports --- yt/utilities/lib/image_samplers.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 6ba9a74a01b..3e22e251bde 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -39,7 +39,7 @@ from .grid_traversal cimport sampler_function, walk_volume from yt.funcs import mylog -from .cyoctree_raytracing cimport CythonOctreeRayTracing, RayInfo +from ._octree_raytracing cimport _OctreeRayTracing, RayInfo cdef extern from "platform_dep.h": @@ -210,7 +210,7 @@ cdef class ImageSampler: @cython.boundscheck(False) @cython.wraparound(False) @cython.cdivision(True) - def cast_through_octree(self, PartitionedGrid pg, CythonOctreeRayTracing oct, int num_threads = 0): + def cast_through_octree(self, PartitionedGrid pg, _OctreeRayTracing oct, int num_threads = 0): cdef RayInfo[int]* ri self.setup(pg) From acf10320016daddb93986d982c6e40f928d74a3b Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:23:55 +0200 Subject: [PATCH 583/653] Fixing isort --- yt/utilities/lib/image_samplers.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/image_samplers.pyx b/yt/utilities/lib/image_samplers.pyx index 3e22e251bde..0565b119c5f 100644 --- a/yt/utilities/lib/image_samplers.pyx +++ b/yt/utilities/lib/image_samplers.pyx @@ -39,7 +39,7 @@ from .grid_traversal cimport sampler_function, walk_volume from yt.funcs import mylog -from ._octree_raytracing cimport _OctreeRayTracing, RayInfo +from ._octree_raytracing cimport RayInfo, _OctreeRayTracing cdef extern from "platform_dep.h": From aaf8d88799a264b30e3727af6a41b041d23863a8 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:25:50 +0200 Subject: [PATCH 584/653] Import from API --- doc/source/cookbook/various_lens.py | 2 +- yt/utilities/answer_testing/utils.py | 2 +- yt/visualization/volume_rendering/tests/test_varia.py | 3 +-- yt/visualization/volume_rendering/volume_rendering.py | 3 +-- 4 files changed, 4 insertions(+), 6 deletions(-) diff --git a/doc/source/cookbook/various_lens.py b/doc/source/cookbook/various_lens.py index 96e03428370..c29e6c9aa8b 100644 --- a/doc/source/cookbook/various_lens.py +++ b/doc/source/cookbook/various_lens.py @@ -1,5 +1,5 @@ import yt -from yt.visualization.volume_rendering.api import Scene, VolumeSource +from yt.visualization.volume_rendering.api import Scene, create_volume_source import numpy as np field = ("gas", "density") diff --git a/yt/utilities/answer_testing/utils.py b/yt/utilities/answer_testing/utils.py index 1f9d802c27e..4c951986f45 100644 --- a/yt/utilities/answer_testing/utils.py +++ b/yt/utilities/answer_testing/utils.py @@ -19,7 +19,7 @@ from yt.loaders import load, load_simulation from yt.units.yt_array import YTArray, YTQuantity from yt.visualization import particle_plots, plot_window as pw, profile_plotter -from yt.visualization.volume_rendering.scene import Scene +from yt.visualization.volume_rendering.api import Scene def _streamline_for_io(params): diff --git a/yt/visualization/volume_rendering/tests/test_varia.py b/yt/visualization/volume_rendering/tests/test_varia.py index 3d6ba9bd58f..4280ffeb439 100644 --- a/yt/visualization/volume_rendering/tests/test_varia.py +++ b/yt/visualization/volume_rendering/tests/test_varia.py @@ -7,8 +7,7 @@ import yt from yt.testing import fake_random_ds -from yt.visualization.volume_rendering.render_source import create_volume_source -from yt.visualization.volume_rendering.scene import Scene +from yt.visualization.volume_rendering.api import Scene, create_volume_source def setup(): diff --git a/yt/visualization/volume_rendering/volume_rendering.py b/yt/visualization/volume_rendering/volume_rendering.py index 10d4a611f38..d72ccb11c47 100644 --- a/yt/visualization/volume_rendering/volume_rendering.py +++ b/yt/visualization/volume_rendering/volume_rendering.py @@ -1,8 +1,7 @@ from yt.funcs import mylog from yt.utilities.exceptions import YTSceneFieldNotFound -from .render_source import MeshSource, create_volume_source -from .scene import Scene +from .api import MeshSource, Scene, create_volume_source from .utils import data_source_or_all From 9e114033fb99c7fae76454e0e4efc4ac7e63a426 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 11:38:10 +0200 Subject: [PATCH 585/653] Get rid of "VolumeSource" invocations --- yt/visualization/volume_rendering/render_source.py | 3 ++- yt/visualization/volume_rendering/scene.py | 3 ++- 2 files changed, 4 insertions(+), 2 deletions(-) diff --git a/yt/visualization/volume_rendering/render_source.py b/yt/visualization/volume_rendering/render_source.py index ba23c4b9a98..b1680c39dec 100644 --- a/yt/visualization/volume_rendering/render_source.py +++ b/yt/visualization/volume_rendering/render_source.py @@ -169,7 +169,8 @@ class VolumeSource(RenderSource, abc.ABC): camera, and renders an image. >>> import yt - >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera + >>> from yt.visualization.volume_rendering.api import\ + ... Scene, create_volume_source, Camera >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> sc = Scene() >>> source = create_volume_source(ds.all_data(), 'density') diff --git a/yt/visualization/volume_rendering/scene.py b/yt/visualization/volume_rendering/scene.py index 98a13c29428..2928c12de39 100644 --- a/yt/visualization/volume_rendering/scene.py +++ b/yt/visualization/volume_rendering/scene.py @@ -50,7 +50,8 @@ class Scene: and a Camera. >>> import yt - >>> from yt.visualization.volume_rendering.api import Scene, VolumeSource, Camera + >>> from yt.visualization.volume_rendering.api import\ + ... Scene, create_volume_source, Camera >>> ds = yt.load('IsolatedGalaxy/galaxy0030/galaxy0030') >>> sc = Scene() >>> source = create_volume_source(ds.all_data(), 'density') From cd8311b9cad6d07590e6cf05371a9ee24f2bae0f Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 12:04:47 +0200 Subject: [PATCH 586/653] Bump image for appveyor --- appveyor.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/appveyor.yml b/appveyor.yml index b2aac8f156c..69312bf2247 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,6 +1,8 @@ # AppVeyor.com is a Continuous Integration service to build and run tests under # Windows +image: Visual Studio 2015 + environment: global: From 748e7ef68fcf240ca7b4e1bbadd52882cc7c8c15 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Sun, 30 Aug 2020 12:29:23 +0200 Subject: [PATCH 587/653] Fix windows build Call function Only support Ndim=3 Drop usage of "using" and rely on typedef instead Use uint8_t instead of u_char Fix platform in appveyor Using uint64_t instead of uint --- appveyor.yml | 2 +- setup.py | 7 +++++ yt/utilities/lib/_octree_raytracing.hpp | 39 ++++++++++--------------- yt/utilities/lib/_octree_raytracing.pxd | 6 ++-- yt/utilities/lib/_octree_raytracing.pyx | 4 +-- yt/utilities/lib/ewah_bool_wrap.pyx | 2 +- 6 files changed, 29 insertions(+), 31 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 69312bf2247..4d313de696c 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -13,7 +13,7 @@ environment: - PYTHON_VERSION: "3.8" platform: - -x64 + - x64 install: - "if not exist \"%userprofile%\\.config\\yt\" mkdir %userprofile%\\.config\\yt" diff --git a/setup.py b/setup.py index 3ebf13b17b4..70336c96e43 100644 --- a/setup.py +++ b/setup.py @@ -1,6 +1,7 @@ import glob import os import sys +from distutils.ccompiler import get_default_compiler from distutils.version import LooseVersion import pkg_resources @@ -45,6 +46,11 @@ else: std_libs = ["m"] +if get_default_compiler() == "msvc": + CPP14_FLAG = ["/std:c++14"] +else: + CPP14_FLAG = ["--std=c++14"] + cythonize_aliases = { "LIB_DIR": "yt/utilities/lib/", "LIB_DIR_EWAH": ["yt/utilities/lib/", "yt/utilities/lib/ewahboolarray/"], @@ -58,6 +64,7 @@ "OMP_ARGS": omp_args, "FIXED_INTERP": "yt/utilities/lib/fixed_interpolator.cpp", "ARTIO_SOURCE": glob.glob("yt/frontends/artio/artio_headers/*.c"), + "CPP14_FLAG": CPP14_FLAG, } lib_exts = [ diff --git a/yt/utilities/lib/_octree_raytracing.hpp b/yt/utilities/lib/_octree_raytracing.hpp index 105b5a988a9..13926907c87 100644 --- a/yt/utilities/lib/_octree_raytracing.hpp +++ b/yt/utilities/lib/_octree_raytracing.hpp @@ -11,6 +11,7 @@ typedef double F; +const int Ndim = 3; /* A simple node struct that contains a key and a fixed number of children, typically Nchildren = 2**Ndim @@ -18,11 +19,9 @@ typedef double F; template struct GenericNode { - using _Node = struct GenericNode; - // Tree data - _Node** children = nullptr; - _Node* parent = nullptr; + GenericNode** children = nullptr; + GenericNode* parent = nullptr; // Node data keyType key; @@ -45,7 +44,6 @@ struct RayInfo { } }; -template struct Ray { std::array o; // Origin std::array d; // Direction @@ -72,7 +70,6 @@ struct Ray { /* Converts an array of integer position into a flattened index. The fast varying index is the last one. */ -template inline unsigned char ijk2iflat(const std::array ijk) { unsigned char iflat = 0; for (auto i : ijk) { @@ -85,7 +82,6 @@ inline unsigned char ijk2iflat(const std::array ijk) { /* Converts a flattened index into an array of integer position. The fast varying index is the last one. */ -template inline std::array iflat2ijk(unsigned char iflat) { std::array ijk; for (auto idim = Ndim-1; idim >= 0; --idim) { @@ -96,13 +92,13 @@ inline std::array iflat2ijk(unsigned char iflat) { }; /* A class to build an octree and cast rays through it. */ -template +template class Octree { - using Node = struct GenericNode; - using keyVector = std::vector; - using Pos = std::array; - using iPos = std::array; - using ucPos = std::array; + typedef GenericNode Node; + typedef std::vector keyVector; + typedef std::array Pos; + typedef std::array iPos; + typedef std::array ucPos; private: const unsigned char twotondim; @@ -142,7 +138,7 @@ class Octree { // std::cerr << "Inserting at level: " << lvl << "/" << maxDepth << std::endl; // this is 0b100..., where the 1 is at position maxDepth - uint mask = 1<<(maxDepth - 1); + uint64_t mask = 1<<(maxDepth - 1); iPos ijk = ipos; std::array bitMask; @@ -157,7 +153,7 @@ class Octree { bitMask[idim] = ijk[idim] & mask; } mask >>= 1; - auto iflat = ijk2iflat(bitMask); + auto iflat = ijk2iflat(bitMask); // Create child if it does not exist yet child = create_get_node(node, iflat); @@ -190,7 +186,7 @@ class Octree { std::vector tList; ray_infos[i] = new RayInfo(Nfound); auto ri = ray_infos[i]; - Ray r(&origins[3*i], &directions[3*i], -1e99, 1e99); + Ray r(&origins[3*i], &directions[3*i], -1e99, 1e99); cast_ray(&r, ri->keys, ri->t); // Keep track of the number of cells hit to preallocate the next ray info container @@ -200,7 +196,7 @@ class Octree { } // Perform single ray tracing - void cast_ray(Ray *r, keyVector &keyList, std::vector &tList) { + void cast_ray(Ray *r, keyVector &keyList, std::vector &tList) { // Boolean mask for direction unsigned char a = 0; unsigned char bmask = twotondim >> 1; @@ -230,7 +226,7 @@ class Octree { } void cast_ray(double* o, double* d, keyVector &keyList, std::vector &tList) { - Ray r(o, d, -1e99, 1e99); + Ray r(o, d, -1e99, 1e99); cast_ray(&r, keyList, tList); } @@ -371,7 +367,7 @@ class Octree { } // From "An Efficient Parametric Algorithm for Octree Traversal" by Revelles, Urena, & Lastra inline unsigned char next_node(const F tx, const F ty, const F tz, - const u_char ix, const u_char iy, const u_char iz) { + const uint8_t ix, const uint8_t iy, const uint8_t iz) { if(tx < std::min(ty, tz)) { // YZ plane return ix; } else if (ty < std::min(tx, tz)) { // XZ plane @@ -381,8 +377,3 @@ class Octree { } } }; - - -// Instanciate 3D octree for easier wrapping in Python -template -using Octree3D = Octree; \ No newline at end of file diff --git a/yt/utilities/lib/_octree_raytracing.pxd b/yt/utilities/lib/_octree_raytracing.pxd index 3aebb754d08..1495febac88 100644 --- a/yt/utilities/lib/_octree_raytracing.pxd +++ b/yt/utilities/lib/_octree_raytracing.pxd @@ -20,11 +20,11 @@ cdef extern from "_octree_raytracing.hpp": vector[T] keys vector[double] t - cdef cppclass Octree3D[T] nogil: - Octree3D(int depth, double* LE, double* RE) + cdef cppclass Octree[T] nogil: + Octree(int depth, double* LE, double* RE) void insert_node_no_ret(const int* ipos, const int lvl, T key) void cast_ray(double* origins, double* directions, vector[T] keyList, vector[double] tList) cdef class _OctreeRayTracing: - cdef Octree3D[int]* oct + cdef Octree[int]* oct cdef int depth diff --git a/yt/utilities/lib/_octree_raytracing.pyx b/yt/utilities/lib/_octree_raytracing.pyx index 372b8967061..b13f56ee47a 100644 --- a/yt/utilities/lib/_octree_raytracing.pyx +++ b/yt/utilities/lib/_octree_raytracing.pyx @@ -1,5 +1,5 @@ # distutils: language = c++ -# distutils: extra_compile_args=["-std=c++11"] +# distutils: extra_compile_args = CPP14_FLAG """This is a wrapper around the C++ class to efficiently cast rays into an octree. It relies on the seminal paper by J. Revelles,, C.Ureña and M.Lastra. """ @@ -28,7 +28,7 @@ cdef class _OctreeRayTracing: def __init__(self, np.ndarray LE, np.ndarray RE, int depth): cdef double* LE_ptr = LE.data cdef double* RE_ptr = RE.data - self.oct = new Octree3D[int](depth, LE_ptr, RE_ptr) + self.oct = new Octree[int](depth, LE_ptr, RE_ptr) self.depth = depth @cython.boundscheck(False) diff --git a/yt/utilities/lib/ewah_bool_wrap.pyx b/yt/utilities/lib/ewah_bool_wrap.pyx index 13ae0d96a90..129363829db 100644 --- a/yt/utilities/lib/ewah_bool_wrap.pyx +++ b/yt/utilities/lib/ewah_bool_wrap.pyx @@ -1,6 +1,6 @@ # distutils: language = c++ # distutils: include_dirs = LIB_DIR_EWAH -# distutils: extra_compile_args=["-std=c++11"] +# distutils: extra_compile_args = CPP14_FLAG """ Wrapper for EWAH Bool Array: https://github.com/lemire/EWAHBoolArray From 96e7aeed593a635e544c61d3842e4fb6f93b4279 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 2 Sep 2020 22:40:56 +0200 Subject: [PATCH 588/653] fix broken file opening mode --- yt/utilities/minimal_representation.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/minimal_representation.py b/yt/utilities/minimal_representation.py index 54775553381..9a8e95906a7 100644 --- a/yt/utilities/minimal_representation.py +++ b/yt/utilities/minimal_representation.py @@ -113,7 +113,7 @@ def store(self, storage): self._ds_mrep.store(storage) metadata, (final_name, chunks) = self._generate_post() metadata["obj_type"] = self.type - with h5py.File(storage, mode="w") as h5f: + with h5py.File(storage, mode="r") as h5f: dset = str(uuid4())[:8] h5f.create_group(dset) _serialize_to_h5(h5f[dset], metadata) From 9e7f0ab9b3fa4b4d33d7b86181a7716e096eb5f2 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 3 Sep 2020 10:17:50 -0500 Subject: [PATCH 589/653] Explicitly enumerate all answer tests --- tests/tests.yaml | 18 ++++++++++++++---- 1 file changed, 14 insertions(+), 4 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index b96a170c014..e528197c192 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -100,8 +100,10 @@ answer_tests: - yt/frontends/tipsy/tests/test_outputs.py:test_tipsy_galaxy local_varia_015: - - yt/frontends/moab/tests/test_c5.py - - yt/fields/tests/test_xray_fields.py + - yt/frontends/moab/tests/test_c5.py:test_cantor_5 + - yt/fields/tests/test_xray_fields.py:test_sloshing_apec + - yt/fields/tests/test_xray_fields.py:test_d9p_cloudy + - yt/fields/tests/test_xray_fields.py:test_d9p_cloudy_local local_unstructured_011: - yt/visualization/volume_rendering/tests/test_mesh_render.py:test_composite_mesh_render @@ -146,8 +148,16 @@ answer_tests: - yt/frontends/ramses/tests/test_outputs.py:test_output_00080 local_ytdata_007: - - yt/frontends/ytdata/tests/test_outputs.py - - yt/frontends/ytdata/tests/test_old_outputs.py + - yt/frontends/ytdata/tests/test_outputs.py:test_datacontainer_data + - yt/frontends/ytdata/tests/test_outputs.py:test_grid_datacontainer_data + - yt/frontends/ytdata/tests/test_outputs.py:test_spatial_data + - yt/frontends/ytdata/tests/test_outputs.py:test_profile_data + - yt/frontends/ytdata/tests/test_outputs.py:test_nonspatial_data + - yt/frontends/ytdata/tests/test_old_outputs.py:test_old_datacontainer_data + - yt/frontends/ytdata/tests/test_old_outputs.py:test_old_grid_datacontainer_data + - yt/frontends/ytdata/tests/test_old_outputs.py:test_old_spatial_data + - yt/frontends/ytdata/tests/test_old_outputs.py:test_old_profile_data + - yt/frontends/ytdata/tests/test_old_outputs.py:test_old_nonspatial_data local_axialpix_006: - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization From 8ced4a6ffb9f47f5775045fdeb6a3040548294a3 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 3 Sep 2020 10:18:18 -0500 Subject: [PATCH 590/653] Exclude answer tests from unittests to avoid false-positive skips --- tests/nose_runner.py | 82 ++++++++++++++++++------------------- tests/test_requirements.txt | 1 + 2 files changed, 40 insertions(+), 43 deletions(-) diff --git a/tests/nose_runner.py b/tests/nose_runner.py index 5c69934d330..62d40f698f1 100644 --- a/tests/nose_runner.py +++ b/tests/nose_runner.py @@ -6,19 +6,10 @@ import nose import numpy import yaml -from coverage import Coverage from yt.config import ytcfg from yt.utilities.answer_testing.framework import AnswerTesting -cov = Coverage( - config_file=".coveragerc", - branch=True, - auto_data=True, - concurrency="multiprocessing", -) -cov.start() - numpy.set_printoptions(threshold=5, edgeitems=1, precision=4) @@ -54,8 +45,6 @@ def __init__(self, job): self.exclusive = exclusive def __call__(self): - old_stderr = sys.stderr - sys.stderr = mystderr = StringIO() test_dir = ytcfg.get("yt", "test_data_dir") answers_dir = os.path.join(test_dir, "answers") if "--with-answer-testing" in self.argv and not os.path.isdir( @@ -69,8 +58,7 @@ def __call__(self): if os.path.isfile("{}.xml".format(self.name)): os.remove("{}.xml".format(self.name)) nose.run(argv=self.argv, addplugins=[AnswerTesting()], exit=False) - sys.stderr = old_stderr - return mystderr.getvalue() + return "" def __str__(self): return "WILL DO self.name = %s" % self.name @@ -78,12 +66,16 @@ def __str__(self): def generate_tasks_input(): pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor) + if sys.version_info < (3, 0, 0): + DROP_TAG = "py3" + else: + DROP_TAG = "py2" test_dir = ytcfg.get("yt", "test_data_dir") answers_dir = os.path.join(test_dir, "answers") with open("tests/tests.yaml", "r") as obj: lines = obj.read() - data = "\n".join([line for line in lines.split("\n") if "py2" not in line]) + data = "\n".join([line for line in lines.split("\n") if DROP_TAG not in line]) tests = yaml.load(data, Loader=yaml.FullLoader) base_argv = ["-s", "--nologcapture", "--with-xunit"] @@ -108,40 +100,44 @@ def generate_tasks_input(): argv += tests["answer_tests"][answer] args.append((argv, False)) + exclude_answers = [] + answer_tests = tests["answer_tests"] + for key in answer_tests: + for t in answer_tests[key]: + exclude_answers.append(t.replace('.py:', '.').replace('/', '.')) + exclude_answers = ["--exclude-test={}".format(ex) for ex in exclude_answers] + args = [ (item + ["--xunit-file=%s.xml" % item[0]], exclusive) + if item[0] != "unittests" + else (item + ["--xunit-file=unittests.xml"] + exclude_answers, exclusive) for item, exclusive in args ] return args if __name__ == "__main__": - try: - # multiprocessing.log_to_stderr(logging.DEBUG) - tasks = multiprocessing.JoinableQueue() - results = multiprocessing.Queue() - - num_consumers = int(os.environ.get("NUM_WORKERS", 6)) - consumers = [NoseWorker(tasks, results) for i in range(num_consumers)] - for w in consumers: - w.start() - - num_jobs = 0 - for job in generate_tasks_input(): - if job[1]: - num_consumers -= 1 # take into account exclusive jobs - tasks.put(NoseTask(job)) - num_jobs += 1 - - for i in range(num_consumers): - tasks.put(None) - - tasks.join() - - while num_jobs: - result = results.get() - num_jobs -= 1 - finally: - cov.stop() - cov.combine() - cov.xml_report(outfile="coverage.xml", ignore_errors=True) + # multiprocessing.log_to_stderr(logging.DEBUG) + tasks = multiprocessing.JoinableQueue() + results = multiprocessing.Queue() + + num_consumers = int(os.environ.get("NUM_WORKERS", 6)) + consumers = [NoseWorker(tasks, results) for i in range(num_consumers)] + for w in consumers: + w.start() + + num_jobs = 0 + for job in generate_tasks_input(): + if job[1]: + num_consumers -= 1 # take into account exclusive jobs + tasks.put(NoseTask(job)) + num_jobs += 1 + + for i in range(num_consumers): + tasks.put(None) + + tasks.join() + + while num_jobs: + result = results.get() + num_jobs -= 1 diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index 1002c74d8e8..0512a8ec5ec 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -29,3 +29,4 @@ f90nml>=1.1.2 MiniballCpp>=0.2.1 pooch>=0.7.0 pykdtree==1.3.1 +nose-exclude From 77550b4729a55d94e5ac893ce78f22571340233a Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Fri, 4 Sep 2020 16:56:04 -0500 Subject: [PATCH 591/653] simplifying the unit test --- yt/data_objects/tests/test_center_squeeze.py | 62 +++++--------------- 1 file changed, 14 insertions(+), 48 deletions(-) diff --git a/yt/data_objects/tests/test_center_squeeze.py b/yt/data_objects/tests/test_center_squeeze.py index b1c26d3aa21..ea9abb0557a 100644 --- a/yt/data_objects/tests/test_center_squeeze.py +++ b/yt/data_objects/tests/test_center_squeeze.py @@ -1,57 +1,23 @@ -from yt.testing import assert_array_equal, fake_amr_ds, fake_particle_ds, fake_random_ds +from yt.testing import assert_equal, fake_amr_ds, fake_particle_ds, fake_random_ds def test_center_squeeze(): - # tests that selected values match when supplying center arrays of different shapes - # to the data container. - - # list of fields to populate fake datasets with - fldz = ("Density",) + # checks that the center is reshaped correctly # create and test amr, random and particle data - check_single_ds(fake_amr_ds(fields=fldz), True) - check_single_ds(fake_random_ds(16, fields=fldz), True) - check_single_ds(fake_particle_ds(npart=100), False) - - -def check_single_ds(ds, check_morton): - # compares values for range of data containers using different center array shapes - - center = ds.domain_center # reference center array + check_single_ds(fake_amr_ds(fields=("Density",))) + check_single_ds(fake_random_ds(16, fields=("Density",))) + check_single_ds(fake_particle_ds(npart=100, fields=("Density",))) - # build some data containers - sp0 = ds.sphere(center, 0.25) - sl0 = ds.slice(0, 0.25, center=center) - reg0 = ds.region(center, [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25]) - # store morton indices of each - i_sp0 = None - i_sl0 = None - i_reg0 = None - if check_morton: - i_sp0 = sp0["index", "morton_index"] - i_sp0.sort() - i_sl0 = sl0["index", "morton_index"] - i_sl0.sort() - i_reg0 = reg0["index", "morton_index"] - i_reg0.sort() - - # create new containers for different shapes of the center array +def check_single_ds(ds): + # checks that the center + center = ds.domain_center # reference center value for test_shape in [(1, 3), (1, 1, 3)]: new_center = center.reshape(test_shape) - sp = ds.sphere(new_center, 0.25) - sl = ds.slice(0, 0.25, center=new_center) - reg = ds.region(new_center, [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25]) - - # compare each to the reference containers - for ob, ob0, i_ob0 in [(sp, sp0, i_sp0), (sl, sl0, i_sl0), (reg, reg0, i_reg0)]: - - # check that selection field values match the reference - for fld in ds.field_list: - assert_array_equal(ob[fld], ob0[fld]) - - if check_morton: - # check that morton indices match the reference - i_ob = ob["index", "morton_index"] - i_ob.sort() - assert_array_equal(i_ob, i_ob0) + assert_equal(ds.sphere(new_center, 0.25).center, center) + assert_equal(ds.slice(0, 0.25, center=new_center).center, center) + assert_equal( + ds.region(new_center, [-0.25, -0.25, -0.25], [0.25, 0.25, 0.25]).center, + center, + ) From 4a099bee70603380eea3b24738b2780e99cca044 Mon Sep 17 00:00:00 2001 From: chrishavlin Date: Fri, 4 Sep 2020 20:23:02 -0500 Subject: [PATCH 592/653] unit test fix --- yt/data_objects/tests/test_center_squeeze.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_center_squeeze.py b/yt/data_objects/tests/test_center_squeeze.py index ea9abb0557a..b7f80bd0005 100644 --- a/yt/data_objects/tests/test_center_squeeze.py +++ b/yt/data_objects/tests/test_center_squeeze.py @@ -7,7 +7,7 @@ def test_center_squeeze(): # create and test amr, random and particle data check_single_ds(fake_amr_ds(fields=("Density",))) check_single_ds(fake_random_ds(16, fields=("Density",))) - check_single_ds(fake_particle_ds(npart=100, fields=("Density",))) + check_single_ds(fake_particle_ds(npart=100)) def check_single_ds(ds): From c4c8141f24cc6bcefa8b307a804b56267e05d3f4 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 8 Sep 2020 12:12:29 -0500 Subject: [PATCH 593/653] Drop py2 support from nose_runner --- tests/nose_runner.py | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/tests/nose_runner.py b/tests/nose_runner.py index 62d40f698f1..b91adde912f 100644 --- a/tests/nose_runner.py +++ b/tests/nose_runner.py @@ -1,7 +1,6 @@ import multiprocessing import os import sys -from io import StringIO import nose import numpy @@ -66,17 +65,9 @@ def __str__(self): def generate_tasks_input(): pyver = "py{}{}".format(sys.version_info.major, sys.version_info.minor) - if sys.version_info < (3, 0, 0): - DROP_TAG = "py3" - else: - DROP_TAG = "py2" - test_dir = ytcfg.get("yt", "test_data_dir") answers_dir = os.path.join(test_dir, "answers") - with open("tests/tests.yaml", "r") as obj: - lines = obj.read() - data = "\n".join([line for line in lines.split("\n") if DROP_TAG not in line]) - tests = yaml.load(data, Loader=yaml.FullLoader) + tests = yaml.load(open("tests/tests.yaml", "r"), Loader=yaml.FullLoader) base_argv = ["-s", "--nologcapture", "--with-xunit"] @@ -104,7 +95,7 @@ def generate_tasks_input(): answer_tests = tests["answer_tests"] for key in answer_tests: for t in answer_tests[key]: - exclude_answers.append(t.replace('.py:', '.').replace('/', '.')) + exclude_answers.append(t.replace(".py:", ".").replace("/", ".")) exclude_answers = ["--exclude-test={}".format(ex) for ex in exclude_answers] args = [ From a6cf130a93308df410c8324fafd6640209530676 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 9 Sep 2020 16:07:44 +0200 Subject: [PATCH 594/653] Revert appveyor image? --- appveyor.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/appveyor.yml b/appveyor.yml index 4d313de696c..db16560bcbf 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -1,8 +1,6 @@ # AppVeyor.com is a Continuous Integration service to build and run tests under # Windows -image: Visual Studio 2015 - environment: global: From ae0d35bae024c52682d444d0fa865b25ce91ccbc Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 9 Sep 2020 15:31:43 -0500 Subject: [PATCH 595/653] [load_sample] Don't try to untar hdf5 files --- yt/loaders.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index 9507fe70301..70e14929f8a 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -1336,11 +1336,11 @@ def load_sample(fn=None, specific_file=None, pbar=True): except ImportError: mylog.warning("tqdm is not installed, progress bar can not be displayed.") - if extension == "h5": - processor = pooch.pooch.Untar() - else: + if extension != "h5": # we are going to assume most files that exist on the hub are # compressed in .tar folders. Some may not. + processor = pooch.pooch.Untar() + else: processor = None storage_fname = fido.pooch_obj.fetch( From 84e0d886583a21393cd7618e994edcec9141c545 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 14 Aug 2020 11:10:37 +0200 Subject: [PATCH 596/653] add a NotImplementedError and catch _that_ instead of waiting for unyt to raise a UnitConversionError. This fixes the following test: yt/data_objects/tests/test_covering_grid.py::test_smoothed_covering_grid_2d_dataset --- yt/utilities/lib/misc_utilities.pyx | 9 ++++++--- 1 file changed, 6 insertions(+), 3 deletions(-) diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 510b2a01cec..13b79aef00e 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -628,11 +628,14 @@ def obtain_relative_velocity_vector( cdef np.ndarray[np.float64_t, ndim=3] vzg cdef np.ndarray[np.float64_t, ndim=4] rvg cdef np.float64_t bv[3] - cdef int i, j, k + cdef int i, j, k, dim units = data[field_names[0]].units bulk_vector = data.get_field_parameter(bulk_vector).to(units) - if len(data[field_names[0]].shape) == 1: + dim = len(data[field_names[0]].shape) == 1 + if dim == 2: + raise NotImplementedError + if dim == 1: # One dimensional data vxf = data[field_names[0]].astype("float64") vyf = data[field_names[1]].astype("float64") @@ -652,7 +655,7 @@ def obtain_relative_velocity_vector( rvf[1, i] = vyf[i] - bv[1] rvf[2, i] = vzf[i] - bv[2] return rvf - else: + elif dim == 3: # Three dimensional data vxg = data[field_names[0]].astype("float64") vyg = data[field_names[1]].astype("float64") From 6282a653ed3fb2b716443b5401f42c6528f1fb4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 10 Sep 2020 09:28:36 +0200 Subject: [PATCH 597/653] explicitly catch NotImplementedError where due --- yt/fields/field_info_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index d7e0a25b5f9..af9da3a800f 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -470,7 +470,7 @@ def check_derived_fields(self, fields_to_check=None): fi = self[field] try: fd = fi.get_dependencies(ds=self.ds) - except Exception as e: + except (NotImplementedError, Exception) as e: if field in self._show_field_errors: raise if not isinstance(e, YTFieldNotFound): From c434ec82b52d30124c944d3b1bced84ef028494b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 10 Sep 2020 10:38:50 +0200 Subject: [PATCH 598/653] fix var definition --- yt/utilities/lib/misc_utilities.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 13b79aef00e..98b2639898d 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -632,7 +632,7 @@ def obtain_relative_velocity_vector( units = data[field_names[0]].units bulk_vector = data.get_field_parameter(bulk_vector).to(units) - dim = len(data[field_names[0]].shape) == 1 + dim = data[field_names[0]].ndim if dim == 2: raise NotImplementedError if dim == 1: From 8abd4bfdd6eb80b1aae2ed681aa79427d830461c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 10 Sep 2020 10:41:47 +0200 Subject: [PATCH 599/653] a more general error message --- yt/utilities/lib/misc_utilities.pyx | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 98b2639898d..4b467930dad 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -633,8 +633,6 @@ def obtain_relative_velocity_vector( units = data[field_names[0]].units bulk_vector = data.get_field_parameter(bulk_vector).to(units) dim = data[field_names[0]].ndim - if dim == 2: - raise NotImplementedError if dim == 1: # One dimensional data vxf = data[field_names[0]].astype("float64") @@ -678,6 +676,8 @@ def obtain_relative_velocity_vector( rvg[1,i,j,k] = vyg[i,j,k] - bv[1] rvg[2,i,j,k] = vzg[i,j,k] - bv[2] return rvg + else: + raise NotImplementedError("Unsupported dim {}".format(dim)) def grow_flagging_field(oofield): cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8") From 4f6ebbd789c4d8d17ec46263d4fee05c2e6c1c4e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 10 Sep 2020 10:43:30 +0200 Subject: [PATCH 600/653] add noqa for intended redundant exception catching --- yt/fields/field_info_container.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/fields/field_info_container.py b/yt/fields/field_info_container.py index af9da3a800f..88a15390560 100644 --- a/yt/fields/field_info_container.py +++ b/yt/fields/field_info_container.py @@ -470,7 +470,7 @@ def check_derived_fields(self, fields_to_check=None): fi = self[field] try: fd = fi.get_dependencies(ds=self.ds) - except (NotImplementedError, Exception) as e: + except (NotImplementedError, Exception) as e: # noqa: B014 if field in self._show_field_errors: raise if not isinstance(e, YTFieldNotFound): From 3837967c37a13e1609ce2755d0f476e722931c3b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 10 Sep 2020 11:52:43 +0200 Subject: [PATCH 601/653] fstrings foreva --- yt/utilities/lib/misc_utilities.pyx | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/utilities/lib/misc_utilities.pyx b/yt/utilities/lib/misc_utilities.pyx index 4b467930dad..8900a6a7ee0 100644 --- a/yt/utilities/lib/misc_utilities.pyx +++ b/yt/utilities/lib/misc_utilities.pyx @@ -677,7 +677,7 @@ def obtain_relative_velocity_vector( rvg[2,i,j,k] = vzg[i,j,k] - bv[2] return rvg else: - raise NotImplementedError("Unsupported dim {}".format(dim)) + raise NotImplementedError(f"Unsupported dimensionality `{dim}`.") def grow_flagging_field(oofield): cdef np.ndarray[np.uint8_t, ndim=3] ofield = oofield.astype("uint8") From 3751ac4b59040b931f907c464e083bb6e0afaca4 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 11 Sep 2020 13:37:51 -0500 Subject: [PATCH 602/653] Update yt/data_objects/selection_objects/base_objects.py MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Co-authored-by: Clément Robert --- yt/data_objects/selection_objects/base_objects.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/base_objects.py index d53c07a3ed4..1ed24d1ed9a 100644 --- a/yt/data_objects/selection_objects/base_objects.py +++ b/yt/data_objects/selection_objects/base_objects.py @@ -265,10 +265,10 @@ def _generate_fields(self, fields_to_generate): fd = self.ds.arr(fd, "") if fi.units != "": raise YTFieldUnitError(fi, fd.units) - except UnitConversionError: - raise YTFieldUnitError(fi, fd.units) - except UnitParseError: - raise YTFieldUnitParseError(fi) + except UnitConversionError as e: + raise YTFieldUnitError(fi, fd.units) from e + except UnitParseError as e: + raise YTFieldUnitParseError(fi) from e self.field_data[field] = fd except GenerationInProgress as gip: for f in gip.fields: From 0ce690a6d5473f1851aa88fc896b0a36a9fe6c79 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 11 Sep 2020 13:51:53 -0500 Subject: [PATCH 603/653] Rename base_objects to data_selection_objects --- yt/visualization/volume_rendering/utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 95a5a06c09a..6a3498b318d 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer3D +from yt.data_objects.selection_objects.data_selection_objects import YTSelectionContainer3D from yt.data_objects.static_output import Dataset from yt.utilities.lib import bounding_volume_hierarchy from yt.utilities.lib.image_samplers import ( From df92ebf9a6d4955f4c057d4b5719372a43066816 Mon Sep 17 00:00:00 2001 From: Matthew Turk Date: Fri, 11 Sep 2020 14:01:50 -0500 Subject: [PATCH 604/653] Fixing linting --- yt/data_objects/construction_data_containers.py | 2 +- yt/data_objects/grid_patch.py | 4 +++- yt/data_objects/octree_subset.py | 4 +++- yt/data_objects/particle_container.py | 4 +++- yt/data_objects/selection_objects/boolean_operations.py | 5 +++-- yt/data_objects/selection_objects/cut_region.py | 2 +- .../{base_objects.py => data_selection_objects.py} | 0 yt/data_objects/selection_objects/disk.py | 2 +- yt/data_objects/selection_objects/object_collection.py | 2 +- yt/data_objects/selection_objects/point.py | 2 +- yt/data_objects/selection_objects/ray.py | 2 +- yt/data_objects/selection_objects/region.py | 2 +- yt/data_objects/selection_objects/slices.py | 2 +- yt/data_objects/selection_objects/spheroids.py | 2 +- yt/data_objects/unstructured_mesh.py | 4 +++- yt/frontends/adaptahop/data_structures.py | 4 +++- yt/frontends/gadget_fof/data_structures.py | 4 +++- yt/frontends/halo_catalog/data_structures.py | 4 +++- yt/visualization/profile_plotter.py | 2 +- yt/visualization/volume_rendering/utils.py | 4 +++- 20 files changed, 37 insertions(+), 20 deletions(-) rename yt/data_objects/selection_objects/{base_objects.py => data_selection_objects.py} (100%) diff --git a/yt/data_objects/construction_data_containers.py b/yt/data_objects/construction_data_containers.py index 913417cc342..d8768977d38 100644 --- a/yt/data_objects/construction_data_containers.py +++ b/yt/data_objects/construction_data_containers.py @@ -11,7 +11,7 @@ from yt.config import ytcfg from yt.data_objects.field_data import YTFieldData -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer1D, YTSelectionContainer2D, YTSelectionContainer3D, diff --git a/yt/data_objects/grid_patch.py b/yt/data_objects/grid_patch.py index 9eca587caf5..89791e39fad 100644 --- a/yt/data_objects/grid_patch.py +++ b/yt/data_objects/grid_patch.py @@ -5,7 +5,9 @@ import yt.geometry.particle_deposit as particle_deposit from yt.config import ytcfg -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.funcs import iterable from yt.geometry.selection_routines import convert_mask_to_indices from yt.units.yt_array import YTArray diff --git a/yt/data_objects/octree_subset.py b/yt/data_objects/octree_subset.py index 7c64671e164..a2c44c50b82 100644 --- a/yt/data_objects/octree_subset.py +++ b/yt/data_objects/octree_subset.py @@ -5,7 +5,9 @@ import yt.geometry.particle_deposit as particle_deposit import yt.geometry.particle_smooth as particle_smooth -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.funcs import mylog from yt.geometry.particle_oct_container import ParticleOctreeContainer from yt.units.dimensions import length diff --git a/yt/data_objects/particle_container.py b/yt/data_objects/particle_container.py index 4cd1b7cba33..92a094cecd1 100644 --- a/yt/data_objects/particle_container.py +++ b/yt/data_objects/particle_container.py @@ -1,7 +1,9 @@ import contextlib from yt.data_objects.data_containers import YTFieldData -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.funcs import ensure_list from yt.utilities.exceptions import ( YTDataSelectorNotImplemented, diff --git a/yt/data_objects/selection_objects/boolean_operations.py b/yt/data_objects/selection_objects/boolean_operations.py index 8dae00feecb..24c4300bf36 100644 --- a/yt/data_objects/selection_objects/boolean_operations.py +++ b/yt/data_objects/selection_objects/boolean_operations.py @@ -1,7 +1,7 @@ import numpy as np import yt.geometry -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) @@ -23,7 +23,8 @@ class YTBooleanContainer(YTSelectionContainer3D): ---------- op : string Can be AND, OR, XOR, NOT or NEG. - dobj1 : yt.data_objects.selection_objects.base_objects.YTSelectionContainer + dobj1 : yt.data_objects.selection_objects.data_selection_objects. + YTSelectionContainer The first selection object dobj2 : yt.data_objects.selection_objects.base_objects.YTSelectionContainer The second object diff --git a/yt/data_objects/selection_objects/cut_region.py b/yt/data_objects/selection_objects/cut_region.py index 679428fb20e..402d6c8b69c 100644 --- a/yt/data_objects/selection_objects/cut_region.py +++ b/yt/data_objects/selection_objects/cut_region.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) diff --git a/yt/data_objects/selection_objects/base_objects.py b/yt/data_objects/selection_objects/data_selection_objects.py similarity index 100% rename from yt/data_objects/selection_objects/base_objects.py rename to yt/data_objects/selection_objects/data_selection_objects.py diff --git a/yt/data_objects/selection_objects/disk.py b/yt/data_objects/selection_objects/disk.py index b65648bcdcd..d52d957df40 100644 --- a/yt/data_objects/selection_objects/disk.py +++ b/yt/data_objects/selection_objects/disk.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) diff --git a/yt/data_objects/selection_objects/object_collection.py b/yt/data_objects/selection_objects/object_collection.py index 1e3b98b1dbc..1f209a8958f 100644 --- a/yt/data_objects/selection_objects/object_collection.py +++ b/yt/data_objects/selection_objects/object_collection.py @@ -1,6 +1,6 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) diff --git a/yt/data_objects/selection_objects/point.py b/yt/data_objects/selection_objects/point.py index 01c8830c846..6f86a309515 100644 --- a/yt/data_objects/selection_objects/point.py +++ b/yt/data_objects/selection_objects/point.py @@ -1,5 +1,5 @@ from yt import YTArray -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer0D, ) diff --git a/yt/data_objects/selection_objects/ray.py b/yt/data_objects/selection_objects/ray.py index 21cb9e360e8..c6292d513fa 100644 --- a/yt/data_objects/selection_objects/ray.py +++ b/yt/data_objects/selection_objects/ray.py @@ -2,7 +2,7 @@ from unyt import udot, unorm from yt import YTArray, YTQuantity -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer1D, ) diff --git a/yt/data_objects/selection_objects/region.py b/yt/data_objects/selection_objects/region.py index f2ac0272335..a049dfe817b 100644 --- a/yt/data_objects/selection_objects/region.py +++ b/yt/data_objects/selection_objects/region.py @@ -1,5 +1,5 @@ from yt import YTArray -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) diff --git a/yt/data_objects/selection_objects/slices.py b/yt/data_objects/selection_objects/slices.py index 3ed68142f97..ebd9b7ffba0 100644 --- a/yt/data_objects/selection_objects/slices.py +++ b/yt/data_objects/selection_objects/slices.py @@ -1,7 +1,7 @@ import numpy as np from yt import iterable -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer2D, ) diff --git a/yt/data_objects/selection_objects/spheroids.py b/yt/data_objects/selection_objects/spheroids.py index 014ec012d24..6ffcc1d0a80 100644 --- a/yt/data_objects/selection_objects/spheroids.py +++ b/yt/data_objects/selection_objects/spheroids.py @@ -1,7 +1,7 @@ import numpy as np from yt import YTArray -from yt.data_objects.selection_objects.base_objects import ( +from yt.data_objects.selection_objects.data_selection_objects import ( YTSelectionContainer, YTSelectionContainer3D, ) diff --git a/yt/data_objects/unstructured_mesh.py b/yt/data_objects/unstructured_mesh.py index 6facdbd6f36..a32ff98ebd0 100644 --- a/yt/data_objects/unstructured_mesh.py +++ b/yt/data_objects/unstructured_mesh.py @@ -1,7 +1,9 @@ import numpy as np import yt.geometry.particle_deposit as particle_deposit -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.funcs import mylog from yt.utilities.exceptions import YTParticleDepositionNotImplemented from yt.utilities.lib.mesh_utilities import fill_fcoords, fill_fwidths diff --git a/yt/frontends/adaptahop/data_structures.py b/yt/frontends/adaptahop/data_structures.py index 97444730d9e..0ba428512a4 100644 --- a/yt/frontends/adaptahop/data_structures.py +++ b/yt/frontends/adaptahop/data_structures.py @@ -13,7 +13,9 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.data_objects.static_output import Dataset from yt.frontends.halo_catalog.data_structures import HaloCatalogFile from yt.funcs import setdefaultattr diff --git a/yt/frontends/gadget_fof/data_structures.py b/yt/frontends/gadget_fof/data_structures.py index 5f486d4b7b7..bc5f3f7d241 100644 --- a/yt/frontends/gadget_fof/data_structures.py +++ b/yt/frontends/gadget_fof/data_structures.py @@ -5,7 +5,9 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.data_objects.static_output import ParticleDataset from yt.frontends.gadget.data_structures import _fix_unit_ordering from yt.frontends.gadget_fof.fields import GadgetFOFFieldInfo, GadgetFOFHaloFieldInfo diff --git a/yt/frontends/halo_catalog/data_structures.py b/yt/frontends/halo_catalog/data_structures.py index 87775a9656d..25e4d414fa6 100644 --- a/yt/frontends/halo_catalog/data_structures.py +++ b/yt/frontends/halo_catalog/data_structures.py @@ -5,7 +5,9 @@ import numpy as np -from yt.data_objects.selection_objects.base_objects import YTSelectionContainer +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer, +) from yt.data_objects.static_output import ( ParticleDataset, ParticleFile, diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index fc5a1461ac6..1e99a6da4b2 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -15,7 +15,7 @@ from yt.utilities.exceptions import YTNotInsideNotebook from yt.utilities.logger import ytLogger as mylog -from ..data_objects.selection_objects.base_objects import YTSelectionContainer +from ..data_objects.selection_objects.data_selection_objects import YTSelectionContainer from .base_plot_types import ImagePlotMPL, PlotMPL from .plot_container import ( ImagePlotContainer, diff --git a/yt/visualization/volume_rendering/utils.py b/yt/visualization/volume_rendering/utils.py index 6a3498b318d..ca847240857 100644 --- a/yt/visualization/volume_rendering/utils.py +++ b/yt/visualization/volume_rendering/utils.py @@ -1,6 +1,8 @@ import numpy as np -from yt.data_objects.selection_objects.data_selection_objects import YTSelectionContainer3D +from yt.data_objects.selection_objects.data_selection_objects import ( + YTSelectionContainer3D, +) from yt.data_objects.static_output import Dataset from yt.utilities.lib import bounding_volume_hierarchy from yt.utilities.lib.image_samplers import ( From 8d64a8b55308e6ca455cb77740ab308ec6e93e6c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 12 Sep 2020 14:52:07 +0200 Subject: [PATCH 605/653] fix docstrings in yt.loaders --- yt/loaders.py | 25 ++++++++++++------------- 1 file changed, 12 insertions(+), 13 deletions(-) diff --git a/yt/loaders.py b/yt/loaders.py index 0713f9d6be0..58a77a2e47d 100644 --- a/yt/loaders.py +++ b/yt/loaders.py @@ -220,7 +220,7 @@ def load_uniform_grid( Examples -------- - + >>> np.random.seed(int(0x4D3D3D3)) >>> bbox = np.array([[0., 1.0], [-1.5, 1.5], [1.0, 2.5]]) >>> arr = np.random.random((128, 128, 128)) >>> data = dict(density=arr) @@ -228,8 +228,8 @@ def load_uniform_grid( ... bbox=bbox, nprocs=12) >>> dd = ds.all_data() >>> dd['density'] - YTArray([ 0.87568064, 0.33686453, 0.70467189, ..., 0.70439916, - 0.97506269, 0.03047113]) g/cm**3 + unyt_array([0.76017901, 0.96855994, 0.49205428, ..., 0.78798258, + 0.97569432, 0.99453904], 'g/cm**3') """ from yt.frontends.stream.data_structures import ( StreamDataset, @@ -933,7 +933,6 @@ def load_octree( Example ------- - >>> import yt >>> import numpy as np >>> oct_mask = [8, 0, 0, 0, 0, 8, 0, 8, ... 0, 0, 0, 0, 0, 0, 0, 0, @@ -942,14 +941,14 @@ def load_octree( >>> >>> octree_mask = np.array(oct_mask, dtype=np.uint8) >>> quantities = {} - >>> quantities['gas', 'density'] = np.random.random((22, 1), dtype='f8') + >>> quantities['gas', 'density'] = np.random.random((22, 1)) >>> bbox = np.array([[-10., 10.], [-10., 10.], [-10., 10.]]) >>> - >>> ds = yt.load_octree(octree_mask=octree_mask, - ... data=quantities, - ... bbox=bbox, - ... over_refine_factor=0, - ... partial_coverage=0) + >>> ds = load_octree(octree_mask=octree_mask, + ... data=quantities, + ... bbox=bbox, + ... over_refine_factor=0, + ... partial_coverage=0) """ from yt.frontends.stream.data_structures import ( @@ -1133,9 +1132,9 @@ def load_unstructured_mesh( ... [0.0, 1.0, 2.0, 3.0]]) ... } >>> - >>> ds = yt.load_unstructured_mesh(connectivity, coordinates, - ... elem_data=elem_data, - ... node_data=node_data) + >>> ds = load_unstructured_mesh(connectivity, coordinates, + ... elem_data=elem_data, + ... node_data=node_data) """ from yt.frontends.exodus_ii.util import get_num_pseudo_dims from yt.frontends.stream.data_structures import ( From 1778f3b446dd119dae9cc7986af46dbaaeafddfb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 12 Sep 2020 15:24:16 +0200 Subject: [PATCH 606/653] doctest: fix errors in yt.funcs docstrings --- yt/funcs.py | 45 +++++++++++++++++++++++++-------------------- 1 file changed, 25 insertions(+), 20 deletions(-) diff --git a/yt/funcs.py b/yt/funcs.py index 0eb68de22b4..335e50e700e 100644 --- a/yt/funcs.py +++ b/yt/funcs.py @@ -880,8 +880,14 @@ def parallel_profile(prefix): Examples -------- + >>> from yt import PhasePlot + >>> from yt.testing import fake_random_ds + ... + >>> fields = ('density', 'temperature', 'cell_mass') + >>> units = ('g/cm**3', 'K', 'g') + >>> ds = fake_random_ds(16, fields=fields, units=units) >>> with parallel_profile('my_profile'): - ... yt.PhasePlot(ds.all_data(), 'density', 'temperature', 'cell_mass') + ... plot = PhasePlot(ds.all_data(), *fields) """ import cProfile @@ -925,15 +931,13 @@ def get_image_suffix(name): def get_output_filename(name, keyword, suffix): r"""Return an appropriate filename for output. - With a name provided by the user, this will decide how to - appropriately name the output file by the following rules: + With a name provided by the user, this will decide how to appropriately name the + output file by the following rules: - 1. if name is None, the filename will be the keyword plus - the suffix. - 2. if name ends with "/", assume name is a directory and - the file will be named name/(keyword+suffix). If the - directory does not exist, first try to create it and - raise an exception if an error occurs. + 1. if name is None, the filename will be the keyword plus the suffix. + 2. if name ends with "/" (resp "\" on Windows), assume name is a directory and the + file will be named name/(keyword+suffix). If the directory does not exist, first + try to create it and raise an exception if an error occurs. 3. if name does not end in the suffix, add the suffix. Parameters @@ -949,18 +953,18 @@ def get_output_filename(name, keyword, suffix): Examples -------- - >>> print(get_output_filename(None, "Projection_x", ".png")) - Projection_x.png - >>> print(get_output_filename("my_file", "Projection_x", ".png")) - my_file.png - >>> print(get_output_filename("my_file/", "Projection_x", ".png")) - my_file/Projection_x.png + >>> get_output_filename(None, "Projection_x", ".png") + 'Projection_x.png' + >>> get_output_filename("my_file", "Projection_x", ".png") + 'my_file.png' + >>> get_output_filename("my_dir/", "Projection_x", ".png") + 'my_dir/Projection_x.png' """ if name is None: name = keyword name = os.path.expanduser(name) - if name[-1] == os.sep and not os.path.isdir(name): + if name.endswith(os.sep) and not os.path.isdir(name): ensure_dir(name) if os.path.isdir(name): name = os.path.join(name, keyword) @@ -1043,6 +1047,7 @@ def memory_checker(interval=15, dest=None): ... arr = np.zeros(1024*1024*1024, dtype="float64") ... time.sleep(15) ... del arr + MEMORY: -1.000e+00 gb """ import threading @@ -1178,10 +1183,10 @@ def get_hash(infile, algorithm="md5", BLOCKSIZE=65536): Examples -------- - >>> import yt.funcs as funcs - >>> funcs.get_hash('/path/to/test.png') - 'd38da04859093d430fa4084fd605de60' - + >>> from tempfile import NamedTemporaryFile + >>> with NamedTemporaryFile() as file: + ... get_hash(file.name) + 'd41d8cd98f00b204e9800998ecf8427e' """ import hashlib From f26d8f11e3a18add830538afbfdd05d4e0bc5a96 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 13 Sep 2020 14:51:00 +0200 Subject: [PATCH 607/653] cleanup: remove deprecated particle_position_relative field (dep: yt 3.5.0) --- yt/fields/particle_fields.py | 29 +++++++---------------------- 1 file changed, 7 insertions(+), 22 deletions(-) diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index 432e464a0c9..3fb015b4631 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -420,28 +420,13 @@ def _relative_particle_position(field, data): field_names = [(ptype, f"particle_position_{ax}") for ax in "xyz"] return obtain_position_vector(data, field_names=field_names).T - def _particle_position_relative(field, data): - if not isinstance(data, FieldDetector): - issue_deprecation_warning( - "The 'particle_position_relative' field has been deprecated in " - + "favor of 'relative_particle_position'." - ) - if isinstance(field.name, tuple): - return data[field.name[0], "relative_particle_position"] - else: - return data["relative_particle_position"] - - for name, func in zip( - ["particle_position_relative", "relative_particle_position"], - [_particle_position_relative, _relative_particle_position], - ): - registry.add_field( - (ptype, name), - sampling_type="particle", - function=func, - units=unit_system["length"], - validators=[ValidateParameter("normal"), ValidateParameter("center")], - ) + registry.add_field( + (ptype, "relative_particle_position"), + sampling_type="particle", + function=_relative_particle_position, + units=unit_system["length"], + validators=[ValidateParameter("normal"), ValidateParameter("center")], + ) def _relative_particle_velocity(field, data): """The vector particle velocities in an arbitrary coordinate system From f2347f7ce8f48eb32090a635d2f38be83e225b13 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sun, 13 Sep 2020 15:57:49 +0200 Subject: [PATCH 608/653] cleanup: remove deprecated particle_velocity_relative field (dep: yt 3.5.0) --- yt/fields/particle_fields.py | 30 +++++++----------------------- 1 file changed, 7 insertions(+), 23 deletions(-) diff --git a/yt/fields/particle_fields.py b/yt/fields/particle_fields.py index 3fb015b4631..4157741203b 100644 --- a/yt/fields/particle_fields.py +++ b/yt/fields/particle_fields.py @@ -1,7 +1,6 @@ import numpy as np from yt.fields.derived_field import ValidateParameter, ValidateSpatial -from yt.fields.field_detector import FieldDetector from yt.funcs import issue_deprecation_warning from yt.units.yt_array import uconcatenate, ucross from yt.utilities.lib.misc_utilities import ( @@ -439,28 +438,13 @@ def _relative_particle_velocity(field, data): field_names = [(ptype, f"particle_velocity_{ax}") for ax in "xyz"] return obtain_relative_velocity_vector(data, field_names=field_names).T - def _particle_velocity_relative(field, data): - if not isinstance(data, FieldDetector): - issue_deprecation_warning( - "The 'particle_velocity_relative' field has been deprecated in " - + "favor of 'relative_particle_velocity'." - ) - if isinstance(field.name, tuple): - return data[field.name[0], "relative_particle_velocity"] - else: - return data["relative_particle_velocity"] - - for name, func in zip( - ["particle_velocity_relative", "relative_particle_velocity"], - [_particle_velocity_relative, _relative_particle_velocity], - ): - registry.add_field( - (ptype, name), - sampling_type="particle", - function=func, - units=unit_system["velocity"], - validators=[ValidateParameter("normal"), ValidateParameter("center")], - ) + registry.add_field( + (ptype, "relative_particle_velocity"), + sampling_type="particle", + function=_relative_particle_velocity, + units=unit_system["velocity"], + validators=[ValidateParameter("normal"), ValidateParameter("center")], + ) def _get_coord_funcs_relative(axi, _ptype): def _particle_pos_rel(field, data): From b04bfc6b1535ced91e849c9cb10d6bb534c6453f Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 13 Sep 2020 19:49:10 -0500 Subject: [PATCH 609/653] Add reqs that needs to be installed before test_requirements.txt --- tests/test_prerequirements.txt | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 tests/test_prerequirements.txt diff --git a/tests/test_prerequirements.txt b/tests/test_prerequirements.txt new file mode 100644 index 00000000000..f6b48e58564 --- /dev/null +++ b/tests/test_prerequirements.txt @@ -0,0 +1,3 @@ +# We need this file mostly because of Cartopy.. +numpy==1.17.5 +cython==0.29.14 From 037a92b203fee3a6de5e6b3e6f3bf1d25f9d1231 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 13 Sep 2020 21:25:29 -0500 Subject: [PATCH 610/653] Use reasonable values for testing zlim --- yt/visualization/tests/test_particle_plot.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/visualization/tests/test_particle_plot.py b/yt/visualization/tests/test_particle_plot.py index 03a4a1e4f81..d4ea788cc9b 100644 --- a/yt/visualization/tests/test_particle_plot.py +++ b/yt/visualization/tests/test_particle_plot.py @@ -41,8 +41,8 @@ def setup(): ] PROJ_ATTR_ARGS["set_log"] = [(("particle_mass", False), {})] PROJ_ATTR_ARGS["set_zlim"] = [ - (("particle_mass", 1e-25, 1e-23), {}), - (("particle_mass", 1e-25, None), {"dynamic_range": 4}), + (("particle_mass", 1e39, 1e42), {}), + (("particle_mass", 1e39, None), {"dynamic_range": 4}), ] PHASE_ATTR_ARGS = { From 7a21195230d31c32508e161506b3fc86e40180c2 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Mon, 14 Sep 2020 13:58:25 -0500 Subject: [PATCH 611/653] Downgrade mpl until PR#2902 is merged --- tests/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index 0512a8ec5ec..e697f5a3c14 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -5,7 +5,7 @@ fastcache==1.0.2 glueviz==0.13.3 h5py==2.10.0 ipython==7.6.1 -matplotlib==3.3.0 +matplotlib==3.1.3 mock nose-timer==1.0.0 nose==1.3.7 From a22f02bf5fe777a02945a3237e987f2bd2bdf4e2 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Mon, 14 Sep 2020 14:56:57 -0500 Subject: [PATCH 612/653] Update answers --- answer-store | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/answer-store b/answer-store index 414c76d4ac5..bb70887d146 160000 --- a/answer-store +++ b/answer-store @@ -1 +1 @@ -Subproject commit 414c76d4ac5679b2706be11feb6d05ce31996ff9 +Subproject commit bb70887d1460d9effc67e2135f89ac02d0132d58 From 4f01145bdcb295dccf0c7dc16e33685eb168d6ab Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Sun, 13 Sep 2020 21:26:35 -0500 Subject: [PATCH 613/653] Bump answer tests (mpl>3.3.0) --- tests/tests.yaml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index e528197c192..b858d6e2c11 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -86,7 +86,7 @@ answer_tests: - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter - local_pw_030: # PR 2735 + local_pw_031: # PR 2902 - yt/visualization/tests/test_plotwindow.py:test_attributes - yt/visualization/tests/test_plotwindow.py:test_attributes_wt - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers @@ -162,7 +162,7 @@ answer_tests: local_axialpix_006: - yt/geometry/coordinates/tests/test_axial_pixelization.py:test_axial_pixelization - local_cylindrical_background_002: + local_cylindrical_background_003: # PR 2902 - yt/geometry/coordinates/tests/test_cylindrical_coordinates.py:test_noise_plots #local_particle_trajectory_001: From 396619236426d8012b00429c8f339a73daa35d5c Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Mon, 14 Sep 2020 23:27:15 -0500 Subject: [PATCH 614/653] Use tests/test_prerequirements.txt for travis too --- .travis.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.travis.yml b/.travis.yml index 2e7540231af..65e0d82b1fc 100644 --- a/.travis.yml +++ b/.travis.yml @@ -64,7 +64,7 @@ install: # pyproject.toml in cartopy. # These versions are pinned, so we will need to update/remove them when # the hack is no longer necessary. - $PIP install numpy==1.18.1 cython==0.29.14 + $PIP install -r tests/test_prerequirements.txt CFLAGS="$CFLAGS -DACCEPT_USE_OF_DEPRECATED_PROJ_API_H" $PIP install -r tests/test_requirements.txt fi $PIP install -e . From a3f2d39d5a60180bb26492ff34712311c8a106fc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 15 Sep 2020 20:57:03 +0200 Subject: [PATCH 615/653] hotfix: make Dataset an abstract class again --- yt/data_objects/static_output.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index 6da6b0bf295..b44d608ccea 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -131,7 +131,7 @@ def ireq(self, value): return ireq -class Dataset: +class Dataset(abc.ABC): default_fluid_type = "gas" default_field = ("gas", "density") From 8ac607ca2f569a6b94b4d4e3ea8b8a7de1d21f0a Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 15 Sep 2020 18:53:43 -0500 Subject: [PATCH 616/653] Bump mpl to 3.3 for tests --- tests/test_requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/test_requirements.txt b/tests/test_requirements.txt index e697f5a3c14..02ac3c2b542 100644 --- a/tests/test_requirements.txt +++ b/tests/test_requirements.txt @@ -5,7 +5,7 @@ fastcache==1.0.2 glueviz==0.13.3 h5py==2.10.0 ipython==7.6.1 -matplotlib==3.1.3 +matplotlib~=3.3 mock nose-timer==1.0.0 nose==1.3.7 From f704f8887afbdfe74cc0a4574b3c2216c0de84ea Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Tue, 15 Sep 2020 20:15:35 -0500 Subject: [PATCH 617/653] Bump travis answers --- answer-store | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/answer-store b/answer-store index bb70887d146..d4cf99b7ca1 160000 --- a/answer-store +++ b/answer-store @@ -1 +1 @@ -Subproject commit bb70887d1460d9effc67e2135f89ac02d0132d58 +Subproject commit d4cf99b7ca1dcb0d83e3ad0ca5ffc020b059f3ed From 1662d857f2895ffc59e734208e6c538c5c13b1bf Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 3 Jul 2020 17:38:41 +0200 Subject: [PATCH 618/653] tests: add init tests for dataseries (from pathlike objects, lists, and lists of pathlike) --- yt/data_objects/tests/test_time_series.py | 36 ++++++++++++++++++++++- 1 file changed, 35 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index e5dabff57bf..8e54f80b6d8 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -2,8 +2,9 @@ import tempfile from pathlib import Path -from yt.data_objects.time_series import get_filenames_from_glob_pattern +from yt.data_objects.time_series import DatasetSeries, get_filenames_from_glob_pattern from yt.testing import assert_raises +from yt.utilities.exceptions import YTOutputNotIdentified def test_pattern_expansion(): @@ -25,3 +26,36 @@ def test_no_match_pattern(): with tempfile.TemporaryDirectory() as tmpdir: pattern = os.path.join(tmpdir, "fake_data_file_*") assert_raises(OSError, get_filenames_from_glob_pattern, pattern) + +def test_init_fake_dataseries(): + + file_list = ["fake_data_file_{}".format(str(i).zfill(4)) for i in range(10)] + with tempfile.TemporaryDirectory() as tmpdir: + pfile_list = [Path(tmpdir) / file for file in file_list] + sfile_list = [os.path.join(tmpdir, f) for f in file_list] + for file in pfile_list: + file.touch() + pattern = os.path.join(tmpdir, "fake_data_file_*") + + # init from str pattern + ts = DatasetSeries(pattern) + assert ts._pre_outputs == sfile_list + + # init from Path pattern + ppattern = Path(pattern) + ts = DatasetSeries(ppattern) + assert ts._pre_outputs == sfile_list + + # init form str list + ts = DatasetSeries(sfile_list) + assert ts._pre_outputs == sfile_list + + # init form Path list + ts = DatasetSeries(pfile_list) + assert ts._pre_outputs == pfile_list + + # rejected input type (str repr of a list) "[file1, file2, ...]" + assert_raises(OSError, DatasetSeries, str(file_list)) + + # finally, check that ts[0] fails to actually load + assert_raises(YTOutputNotIdentified, ts.__getitem__, 0) From 44c6e3c7c39c9925c19b8d9999dfcb7f63d00741 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 3 Jul 2020 20:53:38 +0200 Subject: [PATCH 619/653] try instead of isinstance(str) --- yt/data_objects/time_series.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 60bcfb4d7b2..5bc6a57dced 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -162,8 +162,10 @@ def __init_subclass__(cls, *args, **kwargs): mylog.debug("Registering simulation: %s as %s", code_name, cls) def __new__(cls, outputs, *args, **kwargs): - if isinstance(outputs, str): + try: outputs = get_filenames_from_glob_pattern(outputs) + except TypeError: + pass ret = super(DatasetSeries, cls).__new__(cls) try: ret._pre_outputs = outputs[:] @@ -202,11 +204,11 @@ def _null(x): def __iter__(self): # We can make this fancier, but this works for o in self._pre_outputs: - if isinstance(o, str): + try: ds = self._load(o, **self.kwargs) self._setup_function(ds) yield ds - else: + except TypeError: yield o def __getitem__(self, key): @@ -218,9 +220,11 @@ def __getitem__(self, key): self._pre_outputs[key], parallel=self.parallel, **self.kwargs ) o = self._pre_outputs[key] - if isinstance(o, str): + try: o = self._load(o, **self.kwargs) self._setup_function(o) + except TypeError: + pass return o def __len__(self): From a012176f259149c70267be5cd036d584e7693f6e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Fri, 3 Jul 2020 18:19:07 +0200 Subject: [PATCH 620/653] doc: complete docstring --- yt/data_objects/time_series.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 5bc6a57dced..d0421143775 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -113,10 +113,11 @@ class DatasetSeries: Parameters ---------- - outputs : list or pattern + outputs : list of filenames, or pattern A list of filenames, for instance ["DD0001/DD0001", "DD0002/DD0002"], or a glob pattern (i.e. containing wildcards '[]?!*') such as "DD*/DD*.index". In the latter case, results are sorted automatically. + Filenames and patterns can be of type str, os.Patlike or bytes. parallel : True, False or int This parameter governs the behavior when .piter() is called on the resultant DatasetSeries object. If this is set to False, the time From f86c01737c59a998adb8096e958aee2cd345cdf7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 07:51:20 +0200 Subject: [PATCH 621/653] refactor: make get_filenames_from_glob_pattern a private static method of class DatasetSeries --- yt/data_objects/tests/test_time_series.py | 9 +++--- yt/data_objects/time_series.py | 34 +++++++++++------------ 2 files changed, 22 insertions(+), 21 deletions(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index 8e54f80b6d8..ae181e18cfc 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -2,7 +2,7 @@ import tempfile from pathlib import Path -from yt.data_objects.time_series import DatasetSeries, get_filenames_from_glob_pattern +from yt.data_objects.time_series import DatasetSeries from yt.testing import assert_raises from yt.utilities.exceptions import YTOutputNotIdentified @@ -15,17 +15,18 @@ def test_pattern_expansion(): (Path(tmpdir) / file).touch() pattern = os.path.join(tmpdir, "fake_data_file_*") - found = get_filenames_from_glob_pattern(pattern) + found = DatasetSeries._get_filenames_from_glob_pattern(pattern) assert found == [os.path.join(tmpdir, file) for file in file_list] - found2 = get_filenames_from_glob_pattern(Path(pattern)) + found2 = DatasetSeries._get_filenames_from_glob_pattern(Path(pattern)) assert found2 == found def test_no_match_pattern(): with tempfile.TemporaryDirectory() as tmpdir: pattern = os.path.join(tmpdir, "fake_data_file_*") - assert_raises(OSError, get_filenames_from_glob_pattern, pattern) + assert_raises(OSError, DatasetSeries._get_filenames_from_glob_pattern, pattern) + def test_init_fake_dataseries(): diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index d0421143775..7ccd9c4236b 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -55,22 +55,6 @@ def _eval(params, ds): return cls -def get_filenames_from_glob_pattern(outputs): - """ - Helper function to DatasetSeries.__new__ - handle a special case where "outputs" is assumed to be really a pattern string - """ - pattern = outputs - epattern = os.path.expanduser(pattern) - data_dir = ytcfg.get("yt", "test_data_dir") - # if not match if found from the current work dir, - # we try to match the pattern from the test data dir - file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern)) - if not file_list: - raise OSError(f"No match found for pattern : {pattern}") - return sorted(file_list) - - attrs = ( "refine_by", "dimensionality", @@ -164,7 +148,7 @@ def __init_subclass__(cls, *args, **kwargs): def __new__(cls, outputs, *args, **kwargs): try: - outputs = get_filenames_from_glob_pattern(outputs) + outputs = cls._get_filenames_from_glob_pattern(outputs) except TypeError: pass ret = super(DatasetSeries, cls).__new__(cls) @@ -202,6 +186,22 @@ def _null(x): self.parallel = parallel self.kwargs = kwargs + @staticmethod + def _get_filenames_from_glob_pattern(outputs): + """ + Helper function to DatasetSeries.__new__ + handle a special case where "outputs" is assumed to be really a pattern string + """ + pattern = outputs + epattern = os.path.expanduser(pattern) + data_dir = ytcfg.get("yt", "test_data_dir") + # if not match if found from the current work dir, + # we try to match the pattern from the test data dir + file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern)) + if not file_list: + raise OSError(f"No match found for pattern : {pattern}") + return sorted(file_list) + def __iter__(self): # We can make this fancier, but this works for o in self._pre_outputs: From 98cdc82996676d3c149cf64d66cc71864f1e7662 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 4 Jul 2020 09:20:24 +0200 Subject: [PATCH 622/653] remove useless try block --- yt/data_objects/time_series.py | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 7ccd9c4236b..66a2465e68a 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -152,10 +152,7 @@ def __new__(cls, outputs, *args, **kwargs): except TypeError: pass ret = super(DatasetSeries, cls).__new__(cls) - try: - ret._pre_outputs = outputs[:] - except TypeError as e: - raise YTUnidentifiedDataType(outputs, *args, **kwargs) from e + ret._pre_outputs = outputs[:] return ret def __init__( From faf72d8a08c58bf47f653326de5a1dfccb68f26c Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 13 Aug 2020 16:14:55 +0200 Subject: [PATCH 623/653] FileNotFoundError is better than OSError here --- yt/data_objects/tests/test_time_series.py | 6 ++++-- yt/data_objects/time_series.py | 2 +- 2 files changed, 5 insertions(+), 3 deletions(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index ae181e18cfc..0038f38601f 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -25,7 +25,9 @@ def test_pattern_expansion(): def test_no_match_pattern(): with tempfile.TemporaryDirectory() as tmpdir: pattern = os.path.join(tmpdir, "fake_data_file_*") - assert_raises(OSError, DatasetSeries._get_filenames_from_glob_pattern, pattern) + assert_raises( + FileNotFoundError, DatasetSeries._get_filenames_from_glob_pattern, pattern + ) def test_init_fake_dataseries(): @@ -56,7 +58,7 @@ def test_init_fake_dataseries(): assert ts._pre_outputs == pfile_list # rejected input type (str repr of a list) "[file1, file2, ...]" - assert_raises(OSError, DatasetSeries, str(file_list)) + assert_raises(FileNotFoundError, DatasetSeries, str(file_list)) # finally, check that ts[0] fails to actually load assert_raises(YTOutputNotIdentified, ts.__getitem__, 0) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 66a2465e68a..230e6a034fd 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -196,7 +196,7 @@ def _get_filenames_from_glob_pattern(outputs): # we try to match the pattern from the test data dir file_list = glob.glob(epattern) or glob.glob(os.path.join(data_dir, epattern)) if not file_list: - raise OSError(f"No match found for pattern : {pattern}") + raise FileNotFoundError(f"No match found for pattern : {pattern}") return sorted(file_list) def __iter__(self): From 881f446df6ddbcb6bf24bed540e67005de567d4d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 13 Aug 2020 16:37:29 +0200 Subject: [PATCH 624/653] revert leftover change from previous refactor --- yt/data_objects/time_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index 230e6a034fd..bcb90d56089 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -537,7 +537,7 @@ def __init__(self, parameter_filename, find_outputs=False): """ if not os.path.exists(parameter_filename): - raise OSError(parameter_filename) + raise FileNotFoundError(parameter_filename) self.parameter_filename = parameter_filename self.basename = os.path.basename(parameter_filename) self.directory = os.path.dirname(parameter_filename) From 61e87a46e37452848555cc52c4f54040cda0ad68 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 13 Aug 2020 16:41:28 +0200 Subject: [PATCH 625/653] flynting --- yt/data_objects/tests/test_time_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index 0038f38601f..f44390d4310 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -32,7 +32,7 @@ def test_no_match_pattern(): def test_init_fake_dataseries(): - file_list = ["fake_data_file_{}".format(str(i).zfill(4)) for i in range(10)] + file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)] with tempfile.TemporaryDirectory() as tmpdir: pfile_list = [Path(tmpdir) / file for file in file_list] sfile_list = [os.path.join(tmpdir, f) for f in file_list] From 6b95e423afcc72322d8b16b862b9bb54c4f8b4a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 11:26:36 +0200 Subject: [PATCH 626/653] update exception catching in test following updates on master --- yt/data_objects/tests/test_time_series.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index f44390d4310..80dbd06615b 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -4,7 +4,7 @@ from yt.data_objects.time_series import DatasetSeries from yt.testing import assert_raises -from yt.utilities.exceptions import YTOutputNotIdentified +from yt.utilities.exceptions import YTUnidentifiedDataType def test_pattern_expansion(): @@ -61,4 +61,4 @@ def test_init_fake_dataseries(): assert_raises(FileNotFoundError, DatasetSeries, str(file_list)) # finally, check that ts[0] fails to actually load - assert_raises(YTOutputNotIdentified, ts.__getitem__, 0) + assert_raises(YTUnidentifiedDataType, ts.__getitem__, 0) From 516a8a5f246c762b2dfe3fca2b7dfeb37cc0c462 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 11:27:22 +0200 Subject: [PATCH 627/653] cleanup unused import --- yt/data_objects/time_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index bcb90d56089..f64ca455009 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -12,7 +12,7 @@ from yt.data_objects.particle_trajectories import ParticleTrajectories from yt.funcs import ensure_list, issue_deprecation_warning, iterable, mylog from yt.units.yt_array import YTArray, YTQuantity -from yt.utilities.exceptions import YTException, YTUnidentifiedDataType +from yt.utilities.exceptions import YTException from yt.utilities.object_registries import ( analysis_task_registry, data_object_registry, From 404caa8e6be428a526548191385ebc3564e75251 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Tue, 11 Aug 2020 21:18:07 +0200 Subject: [PATCH 628/653] Fix #2838 --- yt/geometry/grid_geometry_handler.py | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 1602ca40cb0..916de2ca7e5 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -462,7 +462,16 @@ def _mesh_sampling_particle_field(field, data): i, j, k = np.floor((pos - data.LeftEdge) / data.dds).astype("int64").T - return field_values[i, j, k] + # Make sure all particles are within the current grid, otherwise return nan + maxi, maxj, maxk = field_values.shape + + mask = (i < maxi) & (j < maxj) & (k < maxk) + mask &= (i >= 0) & (j >= 0) & (k >= 0) + + result = np.full_like(i, np.nan, dtype="float64") + result[mask] = field_values[i[mask], j[mask], k[mask]] + + return data.ds.arr(result, field_values.units) self.ds.add_field( (ptype, field_name), From 47f5376802d9a66e96506c5880dd24a6eab0af78 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 12 Aug 2020 14:25:47 +0200 Subject: [PATCH 629/653] Use better model for `full_like` --- yt/geometry/grid_geometry_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 916de2ca7e5..e7277903c3b 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -468,7 +468,7 @@ def _mesh_sampling_particle_field(field, data): mask = (i < maxi) & (j < maxj) & (k < maxk) mask &= (i >= 0) & (j >= 0) & (k >= 0) - result = np.full_like(i, np.nan, dtype="float64") + result = np.full_like(field_value, np.nan, dtype="float64") result[mask] = field_values[i[mask], j[mask], k[mask]] return data.ds.arr(result, field_values.units) From 3e713ca5ae5587e201ad93106a90295f4565fbae Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 16:52:53 +0200 Subject: [PATCH 630/653] fix an undefined variable --- yt/geometry/grid_geometry_handler.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index e7277903c3b..46e2f7324c2 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -468,7 +468,7 @@ def _mesh_sampling_particle_field(field, data): mask = (i < maxi) & (j < maxj) & (k < maxk) mask &= (i >= 0) & (j >= 0) & (k >= 0) - result = np.full_like(field_value, np.nan, dtype="float64") + result = np.full_like(field_values, np.nan, dtype="float64") result[mask] = field_values[i[mask], j[mask], k[mask]] return data.ds.arr(result, field_values.units) From 552ac9a9bb4cbb41ba026d6e0e4c8ed7619fe434 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 16 Sep 2020 11:54:46 +0200 Subject: [PATCH 631/653] Fix mesh sampling --- yt/geometry/grid_geometry_handler.py | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/yt/geometry/grid_geometry_handler.py b/yt/geometry/grid_geometry_handler.py index 46e2f7324c2..fd49f1e2149 100644 --- a/yt/geometry/grid_geometry_handler.py +++ b/yt/geometry/grid_geometry_handler.py @@ -468,8 +468,9 @@ def _mesh_sampling_particle_field(field, data): mask = (i < maxi) & (j < maxj) & (k < maxk) mask &= (i >= 0) & (j >= 0) & (k >= 0) - result = np.full_like(field_values, np.nan, dtype="float64") - result[mask] = field_values[i[mask], j[mask], k[mask]] + result = np.full(len(pos), np.nan, dtype="float64") + if result.shape[0] > 0: + result[mask] = field_values[i[mask], j[mask], k[mask]] return data.ds.arr(result, field_values.units) From 096d1c0dd25c3b07450347558b5477c34002d562 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 16 Sep 2020 11:03:03 +0200 Subject: [PATCH 632/653] use only pathlib instead of os.path in test file --- yt/data_objects/tests/test_time_series.py | 17 +++++++++-------- 1 file changed, 9 insertions(+), 8 deletions(-) diff --git a/yt/data_objects/tests/test_time_series.py b/yt/data_objects/tests/test_time_series.py index 80dbd06615b..dea5eeb9fbd 100644 --- a/yt/data_objects/tests/test_time_series.py +++ b/yt/data_objects/tests/test_time_series.py @@ -1,4 +1,3 @@ -import os import tempfile from pathlib import Path @@ -11,20 +10,22 @@ def test_pattern_expansion(): file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)] with tempfile.TemporaryDirectory() as tmpdir: + tmp_path = Path(tmpdir) for file in file_list: - (Path(tmpdir) / file).touch() + (tmp_path / file).touch() - pattern = os.path.join(tmpdir, "fake_data_file_*") + pattern = tmp_path / "fake_data_file_*" + expected = [str(tmp_path / file) for file in file_list] found = DatasetSeries._get_filenames_from_glob_pattern(pattern) - assert found == [os.path.join(tmpdir, file) for file in file_list] + assert found == expected found2 = DatasetSeries._get_filenames_from_glob_pattern(Path(pattern)) - assert found2 == found + assert found2 == expected def test_no_match_pattern(): with tempfile.TemporaryDirectory() as tmpdir: - pattern = os.path.join(tmpdir, "fake_data_file_*") + pattern = Path(tmpdir).joinpath("fake_data_file_*") assert_raises( FileNotFoundError, DatasetSeries._get_filenames_from_glob_pattern, pattern ) @@ -35,10 +36,10 @@ def test_init_fake_dataseries(): file_list = [f"fake_data_file_{str(i).zfill(4)}" for i in range(10)] with tempfile.TemporaryDirectory() as tmpdir: pfile_list = [Path(tmpdir) / file for file in file_list] - sfile_list = [os.path.join(tmpdir, f) for f in file_list] + sfile_list = [str(file) for file in pfile_list] for file in pfile_list: file.touch() - pattern = os.path.join(tmpdir, "fake_data_file_*") + pattern = Path(tmpdir) / "fake_data_file_*" # init from str pattern ts = DatasetSeries(pattern) From 975ca8a331980634cc07262bf2e1c4c86d7e66da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 16 Sep 2020 13:13:39 +0200 Subject: [PATCH 633/653] fix a typo in docstring Co-authored-by: Corentin Cadiou --- yt/data_objects/time_series.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/time_series.py b/yt/data_objects/time_series.py index f64ca455009..08e9ca748bd 100644 --- a/yt/data_objects/time_series.py +++ b/yt/data_objects/time_series.py @@ -101,7 +101,7 @@ class DatasetSeries: A list of filenames, for instance ["DD0001/DD0001", "DD0002/DD0002"], or a glob pattern (i.e. containing wildcards '[]?!*') such as "DD*/DD*.index". In the latter case, results are sorted automatically. - Filenames and patterns can be of type str, os.Patlike or bytes. + Filenames and patterns can be of type str, os.Pathlike or bytes. parallel : True, False or int This parameter governs the behavior when .piter() is called on the resultant DatasetSeries object. If this is set to False, the time From 85089f27044ea679cfbb15d47bdc71ce158c5859 Mon Sep 17 00:00:00 2001 From: Corentin Cadiou Date: Wed, 16 Sep 2020 15:30:47 +0100 Subject: [PATCH 634/653] Fixing line that's too long. --- yt/frontends/ramses/field_handlers.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/yt/frontends/ramses/field_handlers.py b/yt/frontends/ramses/field_handlers.py index 1c3ec43fece..4a165e86577 100644 --- a/yt/frontends/ramses/field_handlers.py +++ b/yt/frontends/ramses/field_handlers.py @@ -60,7 +60,8 @@ def setup_handler(self, domain): self.fname = full_path else: raise FileNotFoundError( - f"Could not find {self._file_type} file (type: {self.ftype}). Tried {full_path}" + f"Could not find {self._file_type} file (type: {self.ftype}). " + f"Tried {full_path}" ) if self.file_descriptor is not None: From d304664830e250664ce34c957e3d7628ee2dabee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 18 Aug 2020 09:30:41 +0200 Subject: [PATCH 635/653] expose shading argument in PhasePlot, set default to 'nearest' --- yt/visualization/particle_plots.py | 3 ++- yt/visualization/profile_plotter.py | 18 +++++++++++++----- 2 files changed, 15 insertions(+), 6 deletions(-) diff --git a/yt/visualization/particle_plots.py b/yt/visualization/particle_plots.py index 777ebf68020..3771b56752b 100644 --- a/yt/visualization/particle_plots.py +++ b/yt/visualization/particle_plots.py @@ -369,6 +369,7 @@ def __init__( deposition="ngp", fontsize=18, figure_size=8.0, + shading="nearest", ): # if no z_fields are passed in, use a constant color @@ -387,7 +388,7 @@ def __init__( ) type(self)._initialize_instance( - self, data_source, profile, fontsize, figure_size + self, data_source, profile, fontsize, figure_size, shading ) diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 1e99a6da4b2..8bccb905097 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -937,6 +937,7 @@ def __init__( fractional=False, fontsize=18, figure_size=8.0, + shading="nearest", ): data_source = data_object_or_all_data(data_source) @@ -955,11 +956,13 @@ def __init__( ) type(self)._initialize_instance( - self, data_source, profile, fontsize, figure_size + self, data_source, profile, fontsize, figure_size, shading ) @classmethod - def _initialize_instance(cls, obj, data_source, profile, fontsize, figure_size): + def _initialize_instance( + cls, obj, data_source, profile, fontsize, figure_size, shading + ): obj.plot_title = {} obj.z_log = {} obj.z_title = {} @@ -971,6 +974,7 @@ def _initialize_instance(cls, obj, data_source, profile, fontsize, figure_size): obj._text_ypos = {} obj._text_kwargs = {} obj._profile = profile + obj._shading = shading obj._profile_valid = True obj._xlim = (None, None) obj._ylim = (None, None) @@ -1115,6 +1119,7 @@ def _setup_plots(self): fig, axes, cax, + shading=self._shading, ) self.plots[f]._toggle_axes(draw_axes) @@ -1182,7 +1187,7 @@ def _setup_plots(self): self._plot_valid = True @classmethod - def from_profile(cls, profile, fontsize=18, figure_size=8.0): + def from_profile(cls, profile, fontsize=18, figure_size=8.0, shading="nearest"): r""" Instantiate a PhasePlot object from a profile object created with :func:`~yt.data_objects.profiles.create_profile`. @@ -1215,7 +1220,7 @@ def from_profile(cls, profile, fontsize=18, figure_size=8.0): obj = cls.__new__(cls) data_source = profile.data_source return cls._initialize_instance( - obj, data_source, profile, fontsize, figure_size + obj, data_source, profile, fontsize, figure_size, shading ) def annotate_text(self, xpos=0.0, ypos=0.0, text=None, **text_kwargs): @@ -1607,12 +1612,13 @@ def __init__( figure, axes, cax, + shading="nearest", ): self._initfinished = False self._draw_colorbar = True self._draw_axes = True self._figure_size = figure_size - + self._shading = shading # Compute layout fontscale = float(fontsize) / 18.0 if fontscale < 1.0: @@ -1646,12 +1652,14 @@ def _init_image( norm = matplotlib.colors.Normalize(zlim[0], zlim[1]) self.image = None self.cb = None + self.image = self.axes.pcolormesh( np.array(x_data), np.array(y_data), np.array(image_data.T), norm=norm, cmap=cmap, + shading=self._shading, ) self.axes.set_xscale(x_scale) From 2a1bb67c564b105d64aeb0926e193081cc78acde Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 16 Sep 2020 16:18:40 +0200 Subject: [PATCH 636/653] add shading arg to docstrings --- yt/visualization/particle_plots.py | 5 +++++ yt/visualization/profile_plotter.py | 10 ++++++++++ 2 files changed, 15 insertions(+) diff --git a/yt/visualization/particle_plots.py b/yt/visualization/particle_plots.py index 3771b56752b..a917aa428a3 100644 --- a/yt/visualization/particle_plots.py +++ b/yt/visualization/particle_plots.py @@ -335,6 +335,11 @@ class ParticlePhasePlot(PhasePlot): figure_size : int Size in inches of the image. Default: 8 (8x8) + shading : str + This argument is directly passed down to matplotlib.axes.Axes.pcolormesh + see + https://matplotlib.org/3.3.1/gallery/images_contours_and_fields/pcolormesh_grids.html#sphx-glr-gallery-images-contours-and-fields-pcolormesh-grids-py # noqa + Default: 'nearest' Examples -------- diff --git a/yt/visualization/profile_plotter.py b/yt/visualization/profile_plotter.py index 8bccb905097..ce379770cdf 100644 --- a/yt/visualization/profile_plotter.py +++ b/yt/visualization/profile_plotter.py @@ -898,6 +898,11 @@ class PhasePlot(ImagePlotContainer): figure_size : int Size in inches of the image. Default: 8 (8x8) + shading : str + This argument is directly passed down to matplotlib.axes.Axes.pcolormesh + see + https://matplotlib.org/3.3.1/gallery/images_contours_and_fields/pcolormesh_grids.html#sphx-glr-gallery-images-contours-and-fields-pcolormesh-grids-py # noqa + Default: 'nearest' Examples -------- @@ -1200,6 +1205,11 @@ def from_profile(cls, profile, fontsize=18, figure_size=8.0, shading="nearest"): The fontsize to use, in points. figure_size : float The figure size to use, in inches. + shading : str + This argument is directly passed down to matplotlib.axes.Axes.pcolormesh + see + https://matplotlib.org/3.3.1/gallery/images_contours_and_fields/pcolormesh_grids.html#sphx-glr-gallery-images-contours-and-fields-pcolormesh-grids-py # noqa + Default: 'nearest' Examples -------- From 93665f5f809379071bbfe422946835cb2799ad45 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 16 Sep 2020 17:51:09 +0200 Subject: [PATCH 637/653] bump jenkins answers --- tests/tests.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 51d391ecb0a..2285669484e 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -86,7 +86,7 @@ answer_tests: - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter - local_pw_031: # PR 2902 + local_pw_032: # PR 2881 - yt/visualization/tests/test_plotwindow.py:test_attributes - yt/visualization/tests/test_plotwindow.py:test_attributes_wt - yt/visualization/tests/test_particle_plot.py:test_particle_projection_answers From 5b9abf996eed4ac2eb189f764642b261b073c8a7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 16 Sep 2020 18:02:51 +0200 Subject: [PATCH 638/653] bump answer-store --- answer-store | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/answer-store b/answer-store index d4cf99b7ca1..edfaa282dea 160000 --- a/answer-store +++ b/answer-store @@ -1 +1 @@ -Subproject commit d4cf99b7ca1dcb0d83e3ad0ca5ffc020b059f3ed +Subproject commit edfaa282dea7c2f4f028f0d77b93dbd84eb2104b From f3ec9ec686b546e58b21795d62f10f95a981d023 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Wed, 16 Sep 2020 11:28:13 -0500 Subject: [PATCH 639/653] Fix indexing issues in sph pixelization --- yt/utilities/lib/pixelization_routines.pyx | 86 ++++++++++------------ 1 file changed, 38 insertions(+), 48 deletions(-) diff --git a/yt/utilities/lib/pixelization_routines.pyx b/yt/utilities/lib/pixelization_routines.pyx index 314fe449937..464a95da2a7 100644 --- a/yt/utilities/lib/pixelization_routines.pyx +++ b/yt/utilities/lib/pixelization_routines.pyx @@ -997,7 +997,7 @@ def pixelize_sph_kernel_projection( cdef np.float64_t q_ij2, posx_diff, posy_diff, ih_j2 cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, px, py cdef np.float64_t period_x, period_y - cdef int index, i, j + cdef int index, i, j, ii, jj cdef np.float64_t[:] _weight_field cdef int xiter[2] cdef int yiter[2] @@ -1040,30 +1040,27 @@ def pixelize_sph_kernel_projection( xiter[1] = yiter[1] = 999 - px = posx[j] - py = posy[j] - if check_period == 1: - if px - hsml[j] < x_min: + if posx[j] - hsml[j] < x_min: xiter[1] = +1 xiterv[1] = period_x - elif px + hsml[j] > x_max: + elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x - if py - hsml[j] < y_min: + if posy[j] - hsml[j] < y_min: yiter[1] = +1 yiterv[1] = period_y - elif py + hsml[j] > y_max: + elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y - for xi in range(2): - if xiter[xi] == 999: continue - px += xiterv[xi] + for ii in range(2): + if xiter[ii] == 999: continue + px = posx[j] + xiterv[ii] if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue - for yi in range(2): - if yiter[yi] == 999: continue - py += yiterv[yi] + for jj in range(2): + if yiter[jj] == 999: continue + py = posy[j] + yiterv[jj] if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue # here we find the pixels which this particle contributes to @@ -1296,7 +1293,7 @@ def pixelize_sph_kernel_slice( cdef np.int64_t xi, yi, x0, x1, y0, y1 cdef np.float64_t q_ij, posx_diff, posy_diff, ih_j cdef np.float64_t x, y, dx, dy, idx, idy, h_j2, h_j, px, py - cdef int index, i, j + cdef int index, i, j, ii, jj cdef np.float64_t period_x, period_y cdef int xiter[2] cdef int yiter[2] @@ -1331,30 +1328,27 @@ def pixelize_sph_kernel_slice( xiter[1] = yiter[1] = 999 - px = posx[j] - py = posy[j] - if check_period == 1: - if px - hsml[j] < x_min: + if posx[j] - hsml[j] < x_min: xiter[1] = +1 xiterv[1] = period_x - elif px + hsml[j] > x_max: + elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x - if py - hsml[j] < y_min: + if posy[j] - hsml[j] < y_min: yiter[1] = +1 yiterv[1] = period_y - elif py + hsml[j] > y_max: + elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y - for xi in range(2): - if xiter[xi] == 999: continue - px += xiterv[xi] + for ii in range(2): + if xiter[ii] == 999: continue + px = posx[j] + xiterv[ii] if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue - for yi in range(2): - if yiter[yi] == 999: continue - py += yiterv[yi] + for jj in range(2): + if yiter[jj] == 999: continue + py = posy[j] + yiterv[jj] if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue x0 = ( (px - hsml[j] - x_min) * idx) @@ -1417,7 +1411,7 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, cdef np.int64_t xi, yi, zi, x0, x1, y0, y1, z0, z1 cdef np.float64_t q_ij, posx_diff, posy_diff, posz_diff, px, py, pz cdef np.float64_t x, y, z, dx, dy, dz, idx, idy, idz, h_j3, h_j2, h_j, ih_j - cdef int index, i, j, k + cdef int index, i, j, k, ii, jj, kk cdef np.float64_t period_x, period_y, period_z cdef int xiter[2] @@ -1462,41 +1456,37 @@ def pixelize_sph_kernel_arbitrary_grid(np.float64_t[:, :, :] buff, xiter[1] = yiter[1] = ziter[1] = 999 - px = posx[j] - py = posy[j] - pz = posz[j] - if check_period == 1: - if px - hsml[j] < x_min: + if posx[j] - hsml[j] < x_min: xiter[1] = +1 xiterv[1] = period_x - elif px + hsml[j] > x_max: + elif posx[j] + hsml[j] > x_max: xiter[1] = -1 xiterv[1] = -period_x - if py - hsml[j] < y_min: + if posy[j] - hsml[j] < y_min: yiter[1] = +1 yiterv[1] = period_y - elif py + hsml[j] > y_max: + elif posy[j] + hsml[j] > y_max: yiter[1] = -1 yiterv[1] = -period_y - if pz - hsml[j] < z_min: + if posz[j] - hsml[j] < z_min: ziter[1] = +1 ziterv[1] = period_z - elif pz + hsml[j] > z_max: + elif posz[j] + hsml[j] > z_max: ziter[1] = -1 ziterv[1] = -period_z - for xi in range(2): - if xiter[xi] == 999: continue - px += xiterv[xi] + for ii in range(2): + if xiter[ii] == 999: continue + px = posx[j] + xiterv[ii] if (px + hsml[j] < x_min) or (px - hsml[j] > x_max): continue - for yi in range(2): - if yiter[yi] == 999: continue - py += yiterv[yi] + for jj in range(2): + if yiter[jj] == 999: continue + py = posy[j] + yiterv[jj] if (py + hsml[j] < y_min) or (py - hsml[j] > y_max): continue - for zi in range(2): - if ziter[zi] == 999: continue - pz += ziterv[zi] + for kk in range(2): + if ziter[kk] == 999: continue + pz = posz[j] + ziterv[kk] if (pz + hsml[j] < z_min) or (pz - hsml[j] > z_max): continue x0 = ( (px - hsml[j] - x_min) * idx) From 45840f22d492d92c9b1fd22f79b9585bacc8ca1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 17 Sep 2020 08:11:14 +0200 Subject: [PATCH 640/653] ci: only test against latest python version on windows to reduce CI bottleneck --- appveyor.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/appveyor.yml b/appveyor.yml index db16560bcbf..e800f3d40df 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -7,7 +7,6 @@ environment: PYTHON: "C:\\Miniconda36-x64" matrix: - - PYTHON_VERSION: "3.6" - PYTHON_VERSION: "3.8" platform: From 4e932e174a39bc6712f84f8bb8544e896527a5d0 Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 17 Sep 2020 10:00:58 -0500 Subject: [PATCH 641/653] Visualize failed PixelizedParticleProjectionValuesTests --- yt/utilities/answer_testing/framework.py | 24 +++++++++++++++++++++++- 1 file changed, 23 insertions(+), 1 deletion(-) diff --git a/yt/utilities/answer_testing/framework.py b/yt/utilities/answer_testing/framework.py index 2267d0222db..1b3def4c487 100644 --- a/yt/utilities/answer_testing/framework.py +++ b/yt/utilities/answer_testing/framework.py @@ -36,6 +36,7 @@ from yt.utilities.exceptions import YTCloudError, YTNoAnswerNameSpecified, YTNoOldAnswer from yt.utilities.logger import disable_stream_logging from yt.visualization import ( + image_writer as image_writer, particle_plots as particle_plots, plot_window as pw, profile_plotter as profile_plotter, @@ -669,7 +670,11 @@ def compare(self, new_result, old_result): # weight_field does not have units, so we do not directly compare them if k == "weight_field_sum": continue - assert_allclose_units(new_result[k], old_result[k], 1e-10) + try: + assert_allclose_units(new_result[k], old_result[k], 1e-10) + except AssertionError: + dump_images(new_result[k], old_result[k]) + raise class PixelizedParticleProjectionValuesTest(PixelizedProjectionValuesTest): @@ -782,6 +787,23 @@ def compare(self, new_result, old_result): assert newc == oldc +def dump_images(new_result, old_result, decimals=10): + tmpfd, old_image = tempfile.mkstemp(suffix=".png") + os.close(tmpfd) + tmpfd, new_image = tempfile.mkstemp(suffix=".png") + os.close(tmpfd) + image_writer.write_projection(new_result, new_image) + image_writer.write_projection(old_result, old_image) + results = compare_images(old_image, new_image, 10 ** (-decimals)) + if results is not None: + tempfiles = [ + line.strip() for line in results.split("\n") if line.endswith(".png") + ] + for fn in tempfiles: + sys.stderr.write(f"\n[[ATTACHMENT|{fn}]]") + sys.stderr.write("\n") + + def compare_image_lists(new_result, old_result, decimals): fns = [] for _ in range(2): From 67719ef598439181e7f19e09812fee3ae3cd6aaf Mon Sep 17 00:00:00 2001 From: "Kacper Kowalik (Xarthisius)" Date: Thu, 17 Sep 2020 11:02:45 -0500 Subject: [PATCH 642/653] Bump answer tests --- tests/tests.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/tests.yaml b/tests/tests.yaml index 51d391ecb0a..8ae2b5e708e 100644 --- a/tests/tests.yaml +++ b/tests/tests.yaml @@ -12,7 +12,7 @@ answer_tests: - yt/frontends/amrvac/tests/test_outputs.py:test_riemann_cartesian_175D - yt/frontends/amrvac/tests/test_outputs.py:test_rmi_cartesian_dust_2D - local_arepo_006: + local_arepo_007: # PR 2909 - yt/frontends/arepo/tests/test_outputs.py:test_arepo_bullet - yt/frontends/arepo/tests/test_outputs.py:test_arepo_tng59 - yt/frontends/arepo/tests/test_outputs.py:test_index_override @@ -59,7 +59,7 @@ answer_tests: - yt/frontends/flash/tests/test_outputs.py:test_wind_tunnel - yt/frontends/flash/tests/test_outputs.py:test_fid_1to3_b1 - local_gadget_004: + local_gadget_005: # PR 2909 - yt/frontends/gadget/tests/test_outputs.py:test_iso_collapse - yt/frontends/gadget/tests/test_outputs.py:test_pid_uniqueness - yt/frontends/gadget/tests/test_outputs.py:test_bigendian_field_access @@ -73,7 +73,7 @@ answer_tests: local_gdf_001: - yt/frontends/gdf/tests/test_outputs.py:test_sedov_tunnel - local_gizmo_005: + local_gizmo_006: # PR 2909 - yt/frontends/gizmo/tests/test_outputs.py:test_gizmo_64 local_halos_009: @@ -82,7 +82,7 @@ answer_tests: - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g5 - yt/frontends/gadget_fof/tests/test_outputs.py:test_fields_g42 - local_owls_005: + local_owls_006: # PR 2909 - yt/frontends/owls/tests/test_outputs.py:test_snapshot_033 - yt/frontends/owls/tests/test_outputs.py:test_OWLS_particlefilter @@ -94,7 +94,7 @@ answer_tests: - yt/visualization/tests/test_particle_plot.py:test_particle_phase_answers - yt/visualization/tests/test_raw_field_slices.py:test_raw_field_slices - local_tipsy_006: + local_tipsy_007: # PR 2909 - yt/frontends/tipsy/tests/test_outputs.py:test_pkdgrav - yt/frontends/tipsy/tests/test_outputs.py:test_gasoline_dmonly - yt/frontends/tipsy/tests/test_outputs.py:test_tipsy_galaxy From 41ce3604d0b9271e8040f58b4aa4a1ec10f576c8 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Tue, 30 Jun 2020 11:25:36 +0200 Subject: [PATCH 643/653] tests: add test_units_overrirde.py --- yt/data_objects/tests/test_units_override.py | 70 ++++++++++++++++++++ 1 file changed, 70 insertions(+) create mode 100644 yt/data_objects/tests/test_units_override.py diff --git a/yt/data_objects/tests/test_units_override.py b/yt/data_objects/tests/test_units_override.py new file mode 100644 index 00000000000..d4da2560743 --- /dev/null +++ b/yt/data_objects/tests/test_units_override.py @@ -0,0 +1,70 @@ +from functools import partial + +from yt.data_objects.static_output import Dataset +from yt.testing import assert_raises +from yt.units import YTQuantity +from yt.units.unit_registry import UnitRegistry + +mock_quan = partial(YTQuantity, registry=UnitRegistry()) + + +def test_schema_validation(): + + valid_schemas = [ + {"length_unit": 1.0}, + {"length_unit": [1.0]}, + {"length_unit": (1.0,)}, + {"length_unit": int(1.0)}, + {"length_unit": (1.0, "m")}, + {"length_unit": [1.0, "m"]}, + {"length_unit": YTQuantity(1.0, "m")}, + ] + + for schema in valid_schemas: + uo = Dataset._sanitize_units_override(schema) + for k, v in uo.items(): + q = mock_quan(v) # check that no error (TypeError) is raised + q.to("pc") # check that q is a length + + +def test_invalid_schema_detection(): + invalid_key_schemas = [ + {"len_unit": 1.0}, # plain invalid key + {"lenght_unit": 1.0}, # typo + ] + for invalid_schema in invalid_key_schemas: + assert_raises(ValueError, Dataset._sanitize_units_override, invalid_schema) + + invalid_val_schemas = [ + {"length_unit": [1, 1, 1]}, # len(val) > 2 + {"length_unit": [1, 1, 1, 1, 1]}, # "data type not understood" in unyt + ] + + for invalid_schema in invalid_val_schemas: + assert_raises(TypeError, Dataset._sanitize_units_override, invalid_schema) + + # 0 shouldn't make sense + invalid_number_schemas = [ + {"length_unit": 0}, + {"length_unit": [0]}, + {"length_unit": (0,)}, + {"length_unit": (0, "cm")}, + ] + for invalid_schema in invalid_number_schemas: + assert_raises(ValueError, Dataset._sanitize_units_override, invalid_schema) + + +def test_typing_error_detection(): + invalid_schema = {"length_unit": "1m"} + + # this is the error that is raised by unyt on bad input + assert_raises(RuntimeError, mock_quan, invalid_schema["length_unit"]) + + # check that the sanitizer function is able to catch the + # type issue before passing down to unyt + assert_raises(TypeError, Dataset._sanitize_units_override, invalid_schema) + + +def test_dimensionality_error_detection(): + invalid_schema = {"length_unit": YTQuantity(1.0, "s")} + assert_raises(ValueError, Dataset._sanitize_units_override, invalid_schema) From 480b0dd664e8723358e3bf9797e74f23af2ffeaa Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 08:23:12 +0200 Subject: [PATCH 644/653] refactor: add _validate_units_override_keys and _sanitize_units_override class methods to Dataset --- yt/data_objects/static_output.py | 95 +++++++++++++++++++++++++------- 1 file changed, 75 insertions(+), 20 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index f211606997d..dae72274e46 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -9,6 +9,7 @@ from stat import ST_CTIME import numpy as np +from unyt.exceptions import UnitConversionError, UnitParseError from yt.config import ytcfg from yt.data_objects.particle_filters import filter_registry @@ -215,9 +216,7 @@ def __init__( self.known_filters = self.known_filters or {} self.particle_unions = self.particle_unions or {} self.field_units = self.field_units or {} - if units_override is None: - units_override = {} - self.units_override = units_override + self.units_override = self.__class__._sanitize_units_override(units_override) # path stuff self.parameter_filename = str(filename) @@ -1116,6 +1115,8 @@ def _assign_unit_system(self, unit_system): self.unit_registry.unit_system = self.unit_system def _create_unit_registry(self, unit_system): + from yt.units import dimensions as dimensions + # yt assumes a CGS unit system by default (for back compat reasons). # Since unyt is MKS by default we specify the MKS values of the base # units in the CGS system. So, for length, 1 cm = .01 m. And so on. @@ -1243,29 +1244,83 @@ def set_code_units(self): "unitary", float(DW.max() * DW.units.base_value), DW.units.dimensions ) + @classmethod + def _validate_units_override_keys(cls, units_override): + valid_keys = set(cls.default_units.keys()) + invalid_keys_found = set(units_override.keys()) - valid_keys + if invalid_keys_found: + raise ValueError( + "units_override contains invalid keys: {}".format(invalid_keys_found) + ) + + default_units = { + "length_unit": "cm", + "time_unit": "s", + "mass_unit": "g", + "velocity_unit": "cm/s", + "magnetic_unit": "gauss", + "temperature_unit": "K", + } + + @classmethod + def _sanitize_units_override(cls, units_override): + """ + Convert units_override values to valid input types for unyt. + Throw meaningful errors early if units_override is ill-formed. + """ + uo = {} + if units_override is None: + return uo + + cls._validate_units_override_keys(units_override) + + for key in cls.default_units: + try: + val = units_override[key] + except KeyError: + continue + try: + uo[key] = YTQuantity(val) + continue + except RuntimeError: + pass + try: + uo[key] = YTQuantity(*val) + continue + except (RuntimeError, TypeError, UnitParseError): + pass + raise TypeError( + "units_override values should be 2-tuples (float, str), " + "YTQuantity objects or real numbers; " + "received {} with type {}.".format(val, type(val)) + ) + for key, q in uo.items(): + if q.units.is_dimensionless: + uo[key] = YTQuantity(q, cls.default_units[key]) + try: + uo[key].to(cls.default_units[key]) + except UnitConversionError: + raise ValueError( + "Inconsistent dimensionality in units_override. " + "Received {} = {}".format(key, uo[key]) + ) + if 1 / uo[key].value == np.inf: + raise ValueError( + "Invalid 0 normalisation factor in units_override for %s." % key + ) + return uo + def _override_code_units(self): - if len(self.units_override) == 0: + if not self.units_override: return + mylog.warning( "Overriding code units: Use this option only if you know that the " "dataset doesn't define the units correctly or at all." ) - for unit, cgs in [ - ("length", "cm"), - ("time", "s"), - ("mass", "g"), - ("velocity", "cm/s"), - ("magnetic", "gauss"), - ("temperature", "K"), - ]: - val = self.units_override.get(f"{unit}_unit", None) - if val is not None: - if isinstance(val, YTQuantity): - val = (val.v, str(val.units)) - elif not isinstance(val, tuple): - val = (val, cgs) - mylog.info("Overriding %s_unit: %g %s.", unit, val[0], val[1]) - setattr(self, f"{unit}_unit", self.quan(val[0], val[1])) + for ukey, val in self.units_override.items(): + mylog.info("Overriding %s: %s.", ukey, val) + setattr(self, ukey, self.quan(val)) _units = None _unit_system_id = None From 3ea91133427759052f7ac757955431b58cba3543 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 25 Jul 2020 10:43:12 +0200 Subject: [PATCH 645/653] fix: fix bug detected in the enzo tests by adapting the test framework to recent unyt version --- yt/testing.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/testing.py b/yt/testing.py index caced3e68e2..0ad6081c840 100644 --- a/yt/testing.py +++ b/yt/testing.py @@ -867,7 +867,7 @@ def units_override_check(fn): unit_attr = getattr(ds1, f"{u}_unit", None) if unit_attr is not None: attrs1.append(unit_attr) - units_override[f"{u}_unit"] = (unit_attr.v, str(unit_attr.units)) + units_override[f"{u}_unit"] = (unit_attr.v, unit_attr.units) del ds1 ds2 = load(fn, units_override=units_override) assert len(ds2.units_override) > 0 From 1e3bd3bf2cd4e201d1a758c65999dc8070f041c7 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 1 Jul 2020 10:29:15 +0200 Subject: [PATCH 646/653] fix: adapt amrvac frontend --- yt/frontends/amrvac/data_structures.py | 119 +++++++--------- yt/frontends/amrvac/tests/test_outputs.py | 131 +----------------- .../amrvac/tests/test_units_override.py | 125 +++++++++++++++++ 3 files changed, 177 insertions(+), 198 deletions(-) create mode 100644 yt/frontends/amrvac/tests/test_units_override.py diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 23650b45d68..8ec7522fd3e 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -4,8 +4,6 @@ """ - - import os import stat import struct @@ -24,14 +22,6 @@ from .datfile_utils import get_header, get_tree_info from .fields import AMRVACFieldInfo -ALLOWED_UNIT_COMBINATIONS = [ - {"numberdensity_unit", "temperature_unit", "length_unit"}, - {"mass_unit", "temperature_unit", "length_unit"}, - {"mass_unit", "time_unit", "length_unit"}, - {"numberdensity_unit", "velocity_unit", "length_unit"}, - {"mass_unit", "velocity_unit", "length_unit"}, -] - class AMRVACGrid(AMRGridPatch): """A class to populate AMRVACHierarchy.grids, setting parent/children relations.""" @@ -86,23 +76,17 @@ def __init__(self, ds, dataset_type="amrvac"): super(AMRVACHierarchy, self).__init__(ds, dataset_type) def _detect_output_fields(self): - """ - Parse field names from datfile header, as stored in self.dataset.parameters - - """ - # required method + """Parse field names from the header, as stored in self.dataset.parameters""" self.field_list = [ (self.dataset_type, f) for f in self.dataset.parameters["w_names"] ] def _count_grids(self): """Set self.num_grids from datfile header.""" - # required method self.num_grids = self.dataset.parameters["nleafs"] def _parse_index(self): """Populate self.grid_* attributes from tree info from datfile header.""" - # required method with open(self.index_filename, "rb") as istream: vaclevels, morton_indices, block_offsets = get_tree_info(istream) assert ( @@ -243,7 +227,6 @@ def __init__( @classmethod def _is_valid(self, *args, **kwargs): """At load time, check whether data is recognized as AMRVAC formatted.""" - # required class method validation = False if args[0].endswith(".dat"): try: @@ -278,7 +261,7 @@ def _parse_geometry(self, geometry_tag): Returns ------- geometry_yt : str - Lower case geometry tag "cartesian", "polar", "cylindrical" or "spherical" + Lower case geometry tag (cartesian", "polar", "cylindrical" or "spherical") Examples -------- @@ -384,17 +367,9 @@ def _parse_parameter_file(self): # units stuff ====================================================================== def _set_code_unit_attributes(self): """Reproduce how AMRVAC internally set up physical normalisation factors.""" - # required method - # devnote: this method is never defined in the parent abstract class Dataset - # but it is called in Dataset.set_code_units(), which is part of - # Dataset.__init__() so it must be defined here. - - # devnote: this gets called later than Dataset._override_code_units() + # This gets called later than Dataset._override_code_units() # This is the reason why it uses setdefaultattr: it will only fill in the gaps # left by the "override", instead of overriding them again. - # For the same reason, self.units_override is set, as well as corresponding - # *_unit instance attributes which may include up to 3 of the following items: - # length, time, mass, velocity, number_density, temperature # note: yt sets hydrogen mass equal to proton mass, amrvac doesn't. mp_cgs = self.quan(1.672621898e-24, "g") # This value is taken from AstroPy @@ -408,23 +383,17 @@ def _set_code_unit_attributes(self): # in this case unit_mass is supplied (and has been set as attribute) mass_unit = self.mass_unit density_unit = mass_unit / length_unit ** 3 - numberdensity_unit = density_unit / ((1.0 + 4.0 * He_abundance) * mp_cgs) + nd_unit = density_unit / ((1.0 + 4.0 * He_abundance) * mp_cgs) else: # other case: numberdensity is supplied. # Fall back to one (default) if no overrides supplied - numberdensity_override = self.units_override.get( - "numberdensity_unit", (1, "cm**-3") - ) - if ( - "numberdensity_unit" in self.units_override - ): # print similar warning as yt when overriding numberdensity - mylog.info( - "Overriding numberdensity_unit: %g %s.", *numberdensity_override + try: + nd_unit = self.quan(self.units_override["numberdensity_unit"]) + except KeyError: + nd_unit = self.quan( + 1.0, self.__class__.default_units["numberdensity_unit"] ) - numberdensity_unit = self.quan( - *numberdensity_override - ) # numberdensity is never set as attribute - density_unit = (1.0 + 4.0 * He_abundance) * mp_cgs * numberdensity_unit + density_unit = (1.0 + 4.0 * He_abundance) * mp_cgs * nd_unit mass_unit = density_unit * length_unit ** 3 # 2. calculations for velocity @@ -442,18 +411,14 @@ def _set_code_unit_attributes(self): # Fall back to one (default) if not temperature_unit = getattr(self, "temperature_unit", self.quan(1, "K")) pressure_unit = ( - (2.0 + 3.0 * He_abundance) - * numberdensity_unit - * kb_cgs - * temperature_unit + (2.0 + 3.0 * He_abundance) * nd_unit * kb_cgs * temperature_unit ).in_cgs() velocity_unit = (np.sqrt(pressure_unit / density_unit)).in_cgs() else: # velocity is not zero if either time was given OR velocity was given pressure_unit = (density_unit * velocity_unit ** 2).in_cgs() temperature_unit = ( - pressure_unit - / ((2.0 + 3.0 * He_abundance) * numberdensity_unit * kb_cgs) + pressure_unit / ((2.0 + 3.0 * He_abundance) * nd_unit * kb_cgs) ).in_cgs() # 4. calculations for magnetic unit and time @@ -464,7 +429,6 @@ def _set_code_unit_attributes(self): setdefaultattr(self, "mass_unit", mass_unit) setdefaultattr(self, "density_unit", density_unit) - setdefaultattr(self, "numberdensity_unit", numberdensity_unit) setdefaultattr(self, "length_unit", length_unit) setdefaultattr(self, "velocity_unit", velocity_unit) @@ -474,48 +438,61 @@ def _set_code_unit_attributes(self): setdefaultattr(self, "pressure_unit", pressure_unit) setdefaultattr(self, "magnetic_unit", magnetic_unit) - def _override_code_units(self): - """Add a check step to the base class' method (Dataset).""" - self._check_override_consistency() - super(AMRVACDataset, self)._override_code_units() - - def _check_override_consistency(self): - """Check that keys in units_override are consistent with respect to AMRVAC's - internal way to set up normalisations factors. + allowed_unit_combinations = [ + {"numberdensity_unit", "temperature_unit", "length_unit"}, + {"mass_unit", "temperature_unit", "length_unit"}, + {"mass_unit", "time_unit", "length_unit"}, + {"numberdensity_unit", "velocity_unit", "length_unit"}, + {"mass_unit", "velocity_unit", "length_unit"}, + ] + + default_units = { + "length_unit": "cm", + "time_unit": "s", + "mass_unit": "g", + "velocity_unit": "cm/s", + "magnetic_unit": "gauss", + "temperature_unit": "K", + # this is the one difference with Dataset.default_units: + # we accept numberdensity_unit as a valid override + "numberdensity_unit": "cm**-3", + } + @classmethod + def _validate_units_override_keys(cls, units_override): + """Check that keys in units_override are consistent with AMRVAC's internal + normalisations factors. """ - # frontend specific method # YT supports overriding other normalisations, this method ensures consistency # between supplied 'units_override' items and those used by AMRVAC. # AMRVAC's normalisations/units have 3 degrees of freedom. # Moreover, if temperature unit is specified then velocity unit will be # calculated accordingly, and vice-versa. - # We replicate this by allowing a finite set of combinations in units_override - if not self.units_override: - return - overrides = set(self.units_override) + # We replicate this by allowing a finite set of combinations. # there are only three degrees of freedom, so explicitly check for this - if len(overrides) > 3: + if len(units_override) > 3: raise ValueError( "More than 3 degrees of freedom were specified " - "in units_override ({} given)".format(len(overrides)) + "in units_override ({} given)".format(len(units_override)) ) # temperature and velocity cannot both be specified - if "temperature_unit" in overrides and "velocity_unit" in overrides: + if "temperature_unit" in units_override and "velocity_unit" in units_override: raise ValueError( "Either temperature or velocity is allowed in units_override, not both." ) # check if provided overrides are allowed - for allowed_combo in ALLOWED_UNIT_COMBINATIONS: - if overrides.issubset(allowed_combo): + suo = set(units_override) + for allowed_combo in cls.allowed_unit_combinations: + if suo.issubset(allowed_combo): break else: raise ValueError( - "Combination {} passed to units_override " - "is not consistent with AMRVAC. \n" - "Allowed combinations are {}".format( - overrides, ALLOWED_UNIT_COMBINATIONS - ) + f"Combination {suo} passed to units_override is not consistent with " + "AMRVAC.\n" + f"Allowed combinations are {cls.allowed_unit_combinations}" ) + + # syntax for mixing super with classmethod is weird... + super(cls, cls)._validate_units_override_keys(units_override) diff --git a/yt/frontends/amrvac/tests/test_outputs.py b/yt/frontends/amrvac/tests/test_outputs.py index 0aae58e6bb9..0e7f89747d8 100644 --- a/yt/frontends/amrvac/tests/test_outputs.py +++ b/yt/frontends/amrvac/tests/test_outputs.py @@ -2,8 +2,8 @@ import yt # NOQA from yt.frontends.amrvac.api import AMRVACDataset, AMRVACGrid -from yt.testing import assert_allclose_units, assert_raises, requires_file -from yt.units import YTQuantity +from yt.testing import requires_file +from yt.units import YTArray from yt.utilities.answer_testing.framework import ( data_dir_load, requires_ds, @@ -59,8 +59,8 @@ def test_grid_attributes(): assert ds.index.max_level == 2 for g in grids: assert isinstance(g, AMRVACGrid) - assert isinstance(g.LeftEdge, yt.units.yt_array.YTArray) - assert isinstance(g.RightEdge, yt.units.yt_array.YTArray) + assert isinstance(g.LeftEdge, YTArray) + assert isinstance(g.RightEdge, YTArray) assert isinstance(g.ActiveDimensions, np.ndarray) assert isinstance(g.Level, (np.int32, np.int64, int)) @@ -136,126 +136,3 @@ def test_rmi_cartesian_dust_2D(): for test in small_patch_amr(ds, _get_fields_to_check(ds)): test_rmi_cartesian_dust_2D.__name__ = test.description yield test - - -# Tests for units: verify that overriding certain units yields the correct derived units -# The following are correct normalisations based on length, numberdensity and temp -length_unit = (1e9, "cm") -numberdensity_unit = (1e9, "cm**-3") -temperature_unit = (1e6, "K") -density_unit = (2.341670657200000e-15, "g*cm**-3") -mass_unit = (2.341670657200000e12, "g") -velocity_unit = (1.164508387441102e07, "cm*s**-1") -pressure_unit = (3.175492240000000e-01, "dyn*cm**-2") -time_unit = (8.587314705370271e01, "s") -magnetic_unit = (1.997608879907716, "gauss") - - -def _assert_normalisations_equal(ds): - assert_allclose_units(ds.length_unit, YTQuantity(*length_unit)) - assert_allclose_units(ds.numberdensity_unit, YTQuantity(*numberdensity_unit)) - assert_allclose_units(ds.temperature_unit, YTQuantity(*temperature_unit)) - assert_allclose_units(ds.density_unit, YTQuantity(*density_unit)) - assert_allclose_units(ds.mass_unit, YTQuantity(*mass_unit)) - assert_allclose_units(ds.velocity_unit, YTQuantity(*velocity_unit)) - assert_allclose_units(ds.pressure_unit, YTQuantity(*pressure_unit)) - assert_allclose_units(ds.time_unit, YTQuantity(*time_unit)) - assert_allclose_units(ds.magnetic_unit, YTQuantity(*magnetic_unit)) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_length_temp_nb(): - # overriding length, temperature, numberdensity - overrides = dict( - length_unit=length_unit, - temperature_unit=temperature_unit, - numberdensity_unit=numberdensity_unit, - ) - ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - _assert_normalisations_equal(ds) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_length_temp_mass(): - # overriding length, temperature, mass - overrides = dict( - length_unit=length_unit, temperature_unit=temperature_unit, mass_unit=mass_unit - ) - ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - _assert_normalisations_equal(ds) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_length_time_mass(): - # overriding length, time, mass - overrides = dict(length_unit=length_unit, time_unit=time_unit, mass_unit=mass_unit) - ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - _assert_normalisations_equal(ds) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_length_vel_nb(): - # overriding length, velocity, numberdensity - overrides = dict( - length_unit=length_unit, - velocity_unit=velocity_unit, - numberdensity_unit=numberdensity_unit, - ) - ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - _assert_normalisations_equal(ds) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_length_vel_mass(): - # overriding length, velocity, mass - overrides = dict( - length_unit=length_unit, velocity_unit=velocity_unit, mass_unit=mass_unit - ) - ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - _assert_normalisations_equal(ds) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_default(): - # test default normalisations, without overrides - ds = data_dir_load(khi_cartesian_2D) - assert_allclose_units(ds.length_unit, YTQuantity(1, "cm")) - assert_allclose_units(ds.numberdensity_unit, YTQuantity(1, "cm**-3")) - assert_allclose_units(ds.temperature_unit, YTQuantity(1, "K")) - assert_allclose_units( - ds.density_unit, YTQuantity(2.341670657200000e-24, "g*cm**-3") - ) - assert_allclose_units(ds.mass_unit, YTQuantity(2.341670657200000e-24, "g")) - assert_allclose_units( - ds.velocity_unit, YTQuantity(1.164508387441102e04, "cm*s**-1") - ) - assert_allclose_units( - ds.pressure_unit, YTQuantity(3.175492240000000e-16, "dyn*cm**-2") - ) - assert_allclose_units(ds.time_unit, YTQuantity(8.587314705370271e-05, "s")) - assert_allclose_units(ds.magnetic_unit, YTQuantity(6.316993934686148e-08, "gauss")) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_too_many_args(): - # test forbidden case: too many arguments (max 3 are allowed) - overrides = dict( - length_unit=length_unit, - numberdensity_unit=numberdensity_unit, - temperature_unit=temperature_unit, - time_unit=time_unit, - ) - with assert_raises(ValueError): - data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) - - -@requires_file(khi_cartesian_2D) -def test_normalisations_vel_and_length(): - # test forbidden case: both velocity and temperature are specified as overrides - overrides = dict( - length_unit=length_unit, - velocity_unit=velocity_unit, - temperature_unit=temperature_unit, - ) - with assert_raises(ValueError): - data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) diff --git a/yt/frontends/amrvac/tests/test_units_override.py b/yt/frontends/amrvac/tests/test_units_override.py new file mode 100644 index 00000000000..12f7314c776 --- /dev/null +++ b/yt/frontends/amrvac/tests/test_units_override.py @@ -0,0 +1,125 @@ +from yt.testing import assert_allclose_units, assert_raises, requires_file +from yt.units import YTQuantity +from yt.utilities.answer_testing.framework import data_dir_load + +khi_cartesian_2D = "amrvac/kh_2d0000.dat" + +# Tests for units: verify that overriding certain units yields the correct derived units. +# The following are correct normalisations based on length, numberdensity and temperature +length_unit = (1e9, "cm") +numberdensity_unit = (1e9, "cm**-3") +temperature_unit = (1e6, "K") +density_unit = (2.341670657200000e-15, "g*cm**-3") +mass_unit = (2.341670657200000e12, "g") +velocity_unit = (1.164508387441102e07, "cm*s**-1") +pressure_unit = (3.175492240000000e-01, "dyn*cm**-2") +time_unit = (8.587314705370271e01, "s") +magnetic_unit = (1.997608879907716, "gauss") + + +def _assert_normalisations_equal(ds): + assert_allclose_units(ds.length_unit, YTQuantity(*length_unit)) + assert_allclose_units(ds.temperature_unit, YTQuantity(*temperature_unit)) + assert_allclose_units(ds.density_unit, YTQuantity(*density_unit)) + assert_allclose_units(ds.mass_unit, YTQuantity(*mass_unit)) + assert_allclose_units(ds.velocity_unit, YTQuantity(*velocity_unit)) + assert_allclose_units(ds.pressure_unit, YTQuantity(*pressure_unit)) + assert_allclose_units(ds.time_unit, YTQuantity(*time_unit)) + assert_allclose_units(ds.magnetic_unit, YTQuantity(*magnetic_unit)) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_length_temp_nb(): + # overriding length, temperature, numberdensity + overrides = dict( + length_unit=length_unit, + temperature_unit=temperature_unit, + numberdensity_unit=numberdensity_unit, + ) + ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + _assert_normalisations_equal(ds) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_length_temp_mass(): + # overriding length, temperature, mass + overrides = dict( + length_unit=length_unit, temperature_unit=temperature_unit, mass_unit=mass_unit + ) + ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + _assert_normalisations_equal(ds) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_length_time_mass(): + # overriding length, time, mass + overrides = dict(length_unit=length_unit, time_unit=time_unit, mass_unit=mass_unit) + ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + _assert_normalisations_equal(ds) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_length_vel_nb(): + # overriding length, velocity, numberdensity + overrides = dict( + length_unit=length_unit, + velocity_unit=velocity_unit, + numberdensity_unit=numberdensity_unit, + ) + ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + _assert_normalisations_equal(ds) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_length_vel_mass(): + # overriding length, velocity, mass + overrides = dict( + length_unit=length_unit, velocity_unit=velocity_unit, mass_unit=mass_unit + ) + ds = data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + _assert_normalisations_equal(ds) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_default(): + # test default normalisations, without overrides + ds = data_dir_load(khi_cartesian_2D) + assert_allclose_units(ds.length_unit, YTQuantity(1, "cm")) + assert_allclose_units(ds.temperature_unit, YTQuantity(1, "K")) + assert_allclose_units( + ds.density_unit, YTQuantity(2.341670657200000e-24, "g*cm**-3") + ) + assert_allclose_units(ds.mass_unit, YTQuantity(2.341670657200000e-24, "g")) + assert_allclose_units( + ds.velocity_unit, YTQuantity(1.164508387441102e04, "cm*s**-1") + ) + assert_allclose_units( + ds.pressure_unit, YTQuantity(3.175492240000000e-16, "dyn*cm**-2") + ) + assert_allclose_units(ds.time_unit, YTQuantity(8.587314705370271e-05, "s")) + assert_allclose_units(ds.magnetic_unit, YTQuantity(6.316993934686148e-08, "gauss")) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_too_many_args(): + # test forbidden case: too many arguments (max 3 are allowed) + overrides = dict( + length_unit=length_unit, + numberdensity_unit=numberdensity_unit, + temperature_unit=temperature_unit, + time_unit=time_unit, + ) + with assert_raises(ValueError): + data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) + + +@requires_file(khi_cartesian_2D) +def test_normalisations_vel_and_length(): + # test forbidden case: both velocity and temperature are specified as overrides + overrides = dict( + length_unit=length_unit, + velocity_unit=velocity_unit, + temperature_unit=temperature_unit, + ) + with assert_raises(ValueError): + data_dir_load(khi_cartesian_2D, kwargs={"units_override": overrides}) From fda0c11d66fae5ddaaa5028430bb614c4c13f86b Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 08:00:22 +0200 Subject: [PATCH 647/653] flynting --- yt/data_objects/static_output.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index dae72274e46..e4a66ed5de3 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1250,7 +1250,7 @@ def _validate_units_override_keys(cls, units_override): invalid_keys_found = set(units_override.keys()) - valid_keys if invalid_keys_found: raise ValueError( - "units_override contains invalid keys: {}".format(invalid_keys_found) + f"units_override contains invalid keys: {invalid_keys_found}" ) default_units = { @@ -1306,7 +1306,7 @@ def _sanitize_units_override(cls, units_override): ) if 1 / uo[key].value == np.inf: raise ValueError( - "Invalid 0 normalisation factor in units_override for %s." % key + f"Invalid 0 normalisation factor in units_override for {key}." ) return uo From 18fc10efd002f85ed64884d1059ad6db285e9a5a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Wed, 12 Aug 2020 08:50:52 +0200 Subject: [PATCH 648/653] fix a B007 error --- yt/data_objects/tests/test_units_override.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/data_objects/tests/test_units_override.py b/yt/data_objects/tests/test_units_override.py index d4da2560743..c874a4ba433 100644 --- a/yt/data_objects/tests/test_units_override.py +++ b/yt/data_objects/tests/test_units_override.py @@ -22,7 +22,7 @@ def test_schema_validation(): for schema in valid_schemas: uo = Dataset._sanitize_units_override(schema) - for k, v in uo.items(): + for v in uo.values(): q = mock_quan(v) # check that no error (TypeError) is raised q.to("pc") # check that q is a length From 4e7a61f4f44309468b75bd7820936b8e8142fe1e Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Sat, 15 Aug 2020 17:35:19 +0200 Subject: [PATCH 649/653] fix syntax error and line too long --- yt/frontends/amrvac/data_structures.py | 6 +++--- yt/frontends/amrvac/tests/test_units_override.py | 5 +++-- 2 files changed, 6 insertions(+), 5 deletions(-) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 8ec7522fd3e..86da4ca4733 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -489,9 +489,9 @@ def _validate_units_override_keys(cls, units_override): break else: raise ValueError( - f"Combination {suo} passed to units_override is not consistent with " - "AMRVAC.\n" - f"Allowed combinations are {cls.allowed_unit_combinations}" + f"Combination {suo} passed to units_override " + "is not consistent with AMRVAC.\n" + "Allowed combinations are {cls.allowed_unit_combinations}" ) # syntax for mixing super with classmethod is weird... diff --git a/yt/frontends/amrvac/tests/test_units_override.py b/yt/frontends/amrvac/tests/test_units_override.py index 12f7314c776..3d3074cdadb 100644 --- a/yt/frontends/amrvac/tests/test_units_override.py +++ b/yt/frontends/amrvac/tests/test_units_override.py @@ -4,8 +4,9 @@ khi_cartesian_2D = "amrvac/kh_2d0000.dat" -# Tests for units: verify that overriding certain units yields the correct derived units. -# The following are correct normalisations based on length, numberdensity and temperature +# Tests for units: check that overriding certain units yields the correct derived units. +# The following are the correct normalisations +# based on length, numberdensity and temperature length_unit = (1e9, "cm") numberdensity_unit = (1e9, "cm**-3") temperature_unit = (1e6, "K") From f3575c42b25b892e3989df9bdb56162f94b49f93 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 17 Sep 2020 06:57:14 +0200 Subject: [PATCH 650/653] fix a broken string --- yt/frontends/amrvac/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 86da4ca4733..7880ccfc754 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -491,7 +491,7 @@ def _validate_units_override_keys(cls, units_override): raise ValueError( f"Combination {suo} passed to units_override " "is not consistent with AMRVAC.\n" - "Allowed combinations are {cls.allowed_unit_combinations}" + f"Allowed combinations are {cls.allowed_unit_combinations}" ) # syntax for mixing super with classmethod is weird... From 97a4c1f5b8a0bfcc9f89e06a39ad9ba1cc44adfc Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 17 Sep 2020 06:58:16 +0200 Subject: [PATCH 651/653] fstringify --- yt/frontends/amrvac/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 7880ccfc754..474a8f37d58 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -475,7 +475,7 @@ def _validate_units_override_keys(cls, units_override): if len(units_override) > 3: raise ValueError( "More than 3 degrees of freedom were specified " - "in units_override ({} given)".format(len(units_override)) + f"in units_override ({len(units_override)} given)" ) # temperature and velocity cannot both be specified if "temperature_unit" in units_override and "velocity_unit" in units_override: From 68984763268af7c93b17aa86c5c3b3f7f18156da Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 17 Sep 2020 20:56:38 +0200 Subject: [PATCH 652/653] typo Co-authored-by: Madicken Munk --- yt/frontends/amrvac/data_structures.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/yt/frontends/amrvac/data_structures.py b/yt/frontends/amrvac/data_structures.py index 474a8f37d58..3934d937174 100644 --- a/yt/frontends/amrvac/data_structures.py +++ b/yt/frontends/amrvac/data_structures.py @@ -261,7 +261,7 @@ def _parse_geometry(self, geometry_tag): Returns ------- geometry_yt : str - Lower case geometry tag (cartesian", "polar", "cylindrical" or "spherical") + Lower case geometry tag ("cartesian", "polar", "cylindrical" or "spherical") Examples -------- From a341f822c4e97cfd4a259a986262221773398769 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Cl=C3=A9ment=20Robert?= Date: Thu, 17 Sep 2020 23:48:36 +0200 Subject: [PATCH 653/653] doc: complete docstring + inline comments --- yt/data_objects/static_output.py | 42 +++++++++++++++++++++++++++----- 1 file changed, 36 insertions(+), 6 deletions(-) diff --git a/yt/data_objects/static_output.py b/yt/data_objects/static_output.py index e4a66ed5de3..78cf39ff25f 100644 --- a/yt/data_objects/static_output.py +++ b/yt/data_objects/static_output.py @@ -1115,7 +1115,7 @@ def _assign_unit_system(self, unit_system): self.unit_registry.unit_system = self.unit_system def _create_unit_registry(self, unit_system): - from yt.units import dimensions as dimensions + from yt.units import dimensions # yt assumes a CGS unit system by default (for back compat reasons). # Since unyt is MKS by default we specify the MKS values of the base @@ -1267,6 +1267,31 @@ def _sanitize_units_override(cls, units_override): """ Convert units_override values to valid input types for unyt. Throw meaningful errors early if units_override is ill-formed. + + Parameters + ---------- + units_override : dict + + keys should be strings with format "_unit" (e.g. "mass_unit"), and + need to match a key in cls.default_units + + values should be mappable to unyt.unyt_quantity objects, and can be any + combinations of: + - unyt.unyt_quantity + - 2-long sequence (tuples, list, ...) with types (number, str) + e.g. (10, "km"), (0.1, "s") + - number (in which case the associated is taken from cls.default_unit) + + + Raises + ------ + TypeError + If unit_override has invalid types + + ValueError + If provided units do not match the intended dimensionality, + or in case of a zero scaling factor. + """ uo = {} if units_override is None: @@ -1279,31 +1304,36 @@ def _sanitize_units_override(cls, units_override): val = units_override[key] except KeyError: continue + + # Now attempt to instanciate a unyt.unyt_quantity from val ... try: + # ... directly (valid if val is a number, or a unyt_quantity) uo[key] = YTQuantity(val) continue except RuntimeError: + # note that unyt.unyt_quantity throws RuntimeError in lieu of TypeError pass try: + # ... with tuple unpacking (valid if val is a sequence) uo[key] = YTQuantity(*val) continue except (RuntimeError, TypeError, UnitParseError): pass raise TypeError( - "units_override values should be 2-tuples (float, str), " + "units_override values should be 2-sequence (float, str), " "YTQuantity objects or real numbers; " - "received {} with type {}.".format(val, type(val)) + f"received {val} with type {type(val)}." ) for key, q in uo.items(): if q.units.is_dimensionless: uo[key] = YTQuantity(q, cls.default_units[key]) try: uo[key].to(cls.default_units[key]) - except UnitConversionError: + except UnitConversionError as err: raise ValueError( "Inconsistent dimensionality in units_override. " - "Received {} = {}".format(key, uo[key]) - ) + f"Received {key} = {uo[key]}" + ) from err if 1 / uo[key].value == np.inf: raise ValueError( f"Invalid 0 normalisation factor in units_override for {key}."