Skip to content

Commit

Permalink
[black-command] fixes
Browse files Browse the repository at this point in the history
  • Loading branch information
yt-fido committed Sep 18, 2020
1 parent 9b79f01 commit 8904cb1
Show file tree
Hide file tree
Showing 45 changed files with 385 additions and 238 deletions.
15 changes: 11 additions & 4 deletions conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,16 +30,23 @@ def pytest_addoption(parser):
Lets options be passed to test functions.
"""
parser.addoption(
"--with-answer-testing", action="store_true", default=False,
"--with-answer-testing",
action="store_true",
default=False,
)
parser.addoption(
"--answer-store", action="store_true", default=False,
"--answer-store",
action="store_true",
default=False,
)
parser.addoption(
"--answer-big-data", action="store_true", default=False,
"--answer-big-data",
action="store_true",
default=False,
)
parser.addoption(
"--save-answer-arrays", action="store_true",
"--save-answer-arrays",
action="store_true",
)


Expand Down
10 changes: 9 additions & 1 deletion doc/source/cookbook/multiplot_phaseplot.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,15 @@
# Load the data and create a single plot
ds = yt.load("enzo_tiny_cosmology/DD00%2d/DD00%2d" % (SnapNum, SnapNum))
ad = ds.all_data()
p = yt.PhasePlot(ad, "density", "temperature", ["cell_mass",], weight_field=None)
p = yt.PhasePlot(
ad,
"density",
"temperature",
[
"cell_mass",
],
weight_field=None,
)

# Ensure the axes and colorbar limits match for all plots
p.set_xlim(1.0e-32, 8.0e-26)
Expand Down
6 changes: 5 additions & 1 deletion doc/source/cookbook/particle_filter_sfr.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,11 @@ def formed_star(pfilter, data):

time_range = [0, 5e8] # years
n_bins = 1000
hist, bins = np.histogram(formation_time, bins=n_bins, range=time_range,)
hist, bins = np.histogram(
formation_time,
bins=n_bins,
range=time_range,
)
inds = np.digitize(formation_time, bins=bins)
time = (bins[:-1] + bins[1:]) / 2

Expand Down
4 changes: 3 additions & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@
],
keywords="astronomy astrophysics visualization " + "amr adaptivemeshrefinement",
entry_points={
"console_scripts": ["yt = yt.utilities.command_line:run_main",],
"console_scripts": [
"yt = yt.utilities.command_line:run_main",
],
"nose.plugins.0.10": [
"answer-testing = yt.utilities.answer_testing.framework:AnswerTesting"
],
Expand Down
6 changes: 5 additions & 1 deletion yt/data_objects/construction_data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -685,7 +685,11 @@ def to_xarray(self, fields=None):
coords = {}
for f in fields or self.field_data.keys():
data[f] = {
"dims": ("x", "y", "z",),
"dims": (
"x",
"y",
"z",
),
"data": self[f],
"attrs": {"units": str(self[f].uq)},
}
Expand Down
136 changes: 68 additions & 68 deletions yt/data_objects/data_containers.py
Original file line number Diff line number Diff line change
Expand Up @@ -713,74 +713,74 @@ def create_firefly_object(
dataset_name="yt",
):
r"""This function links a region of data stored in a yt dataset
to the Python frontend API for [Firefly](github.com/ageller/Firefly),
a browser-based particle visualization platform.
Parameters
----------
path_to_firefly : string
The (ideally) absolute path to the direction containing the index.html
file of Firefly.
fields_to_include : array_like of strings
A list of fields that you want to include in your
Firefly visualization for on-the-fly filtering and
colormapping.
default_decimation_factor : integer
The factor by which you want to decimate each particle group
by (e.g. if there are 1e7 total particles in your simulation
you might want to set this to 100 at first). Randomly samples
your data like `shuffled_data[::decimation_factor]` so as to
not overtax a system. This is adjustable on a per particle group
basis by changing the returned reader's
`reader.particleGroup[i].decimation_factor` before calling
`reader.dumpToJSON()`.
velocity_units : string
The units that the velocity should be converted to in order to
show streamlines in Firefly. Defaults to km/s.
coordinate_units: string
The units that the coordinates should be converted to. Defaults to
kpc.
show_unused_fields: boolean
A flag to optionally print the fields that are available, in the
dataset but were not explicitly requested to be tracked.
dataset_name: string
The name of the subdirectory the JSON files will be stored in
(and the name that will appear in startup.json and in the dropdown
menu at startup). e.g. `yt` -> json files will appear in
`Firefly/data/yt`.
Returns
-------
reader : firefly_api.reader.Reader object
A reader object from the firefly_api, configured
to output
Examples
--------
>>> ramses_ds = yt.load(
... "/Users/agurvich/Desktop/yt_workshop/"+
... "DICEGalaxyDisk_nonCosmological/output_00002/info_00002.txt")
>>> region = ramses_ds.sphere(ramses_ds.domain_center,(1000,'kpc'))
>>> reader = region.create_firefly_object(
... path_to_firefly="/Users/agurvich/research/repos/Firefly",
... fields_to_include=[
... 'particle_extra_field_1',
... 'particle_extra_field_2'],
... fields_units = ['dimensionless','dimensionless'],
... dataset_name = 'IsoGalaxyRamses')
>>> reader.options['color']['io']=[1,1,0,1]
>>> reader.particleGroups[0].decimation_factor=100
>>> reader.dumpToJSON()
to the Python frontend API for [Firefly](github.com/ageller/Firefly),
a browser-based particle visualization platform.
Parameters
----------
path_to_firefly : string
The (ideally) absolute path to the direction containing the index.html
file of Firefly.
fields_to_include : array_like of strings
A list of fields that you want to include in your
Firefly visualization for on-the-fly filtering and
colormapping.
default_decimation_factor : integer
The factor by which you want to decimate each particle group
by (e.g. if there are 1e7 total particles in your simulation
you might want to set this to 100 at first). Randomly samples
your data like `shuffled_data[::decimation_factor]` so as to
not overtax a system. This is adjustable on a per particle group
basis by changing the returned reader's
`reader.particleGroup[i].decimation_factor` before calling
`reader.dumpToJSON()`.
velocity_units : string
The units that the velocity should be converted to in order to
show streamlines in Firefly. Defaults to km/s.
coordinate_units: string
The units that the coordinates should be converted to. Defaults to
kpc.
show_unused_fields: boolean
A flag to optionally print the fields that are available, in the
dataset but were not explicitly requested to be tracked.
dataset_name: string
The name of the subdirectory the JSON files will be stored in
(and the name that will appear in startup.json and in the dropdown
menu at startup). e.g. `yt` -> json files will appear in
`Firefly/data/yt`.
Returns
-------
reader : firefly_api.reader.Reader object
A reader object from the firefly_api, configured
to output
Examples
--------
>>> ramses_ds = yt.load(
... "/Users/agurvich/Desktop/yt_workshop/"+
... "DICEGalaxyDisk_nonCosmological/output_00002/info_00002.txt")
>>> region = ramses_ds.sphere(ramses_ds.domain_center,(1000,'kpc'))
>>> reader = region.create_firefly_object(
... path_to_firefly="/Users/agurvich/research/repos/Firefly",
... fields_to_include=[
... 'particle_extra_field_1',
... 'particle_extra_field_2'],
... fields_units = ['dimensionless','dimensionless'],
... dataset_name = 'IsoGalaxyRamses')
>>> reader.options['color']['io']=[1,1,0,1]
>>> reader.particleGroups[0].decimation_factor=100
>>> reader.dumpToJSON()
"""

## attempt to import firefly_api
Expand Down
3 changes: 2 additions & 1 deletion yt/data_objects/static_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -1505,7 +1505,8 @@ def add_field(self, name, function, sampling_type, **kwargs):
# Handle the case where the field has already been added.
if not override and name in self.field_info:
mylog.warning(
"Field %s already exists. To override use `force_override=True`.", name,
"Field %s already exists. To override use `force_override=True`.",
name,
)

self.field_info.add_field(name, function, sampling_type, **kwargs)
Expand Down
4 changes: 3 additions & 1 deletion yt/fields/field_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,9 @@ def get_radius(data, field_prefix, ftype):
# This will coerce the units, so we don't need to worry that we copied
# it from a cm**2 array.
np.subtract(
data[ftype, f"{field_prefix}{ax}"].in_base(unit_system.name), center[i], r,
data[ftype, f"{field_prefix}{ax}"].in_base(unit_system.name),
center[i],
r,
)
if data.ds.periodicity[i]:
np.abs(r, r)
Expand Down
14 changes: 12 additions & 2 deletions yt/fields/field_info_container.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,8 +168,18 @@ def setup_smoothed_fields(self, ptype, num_neighbors=64, ftype="gas"):
uni_alias_name = alias_name.replace("particle_position_", "")
elif "particle_" in alias_name:
uni_alias_name = alias_name.replace("particle_", "")
new_aliases.append(((ftype, uni_alias_name), (ptype, alias_name),))
new_aliases.append(((ptype, uni_alias_name), (ptype, alias_name),))
new_aliases.append(
(
(ftype, uni_alias_name),
(ptype, alias_name),
)
)
new_aliases.append(
(
(ptype, uni_alias_name),
(ptype, alias_name),
)
)
for alias, source in new_aliases:
self.alias(alias, source)

Expand Down
3 changes: 2 additions & 1 deletion yt/fields/local_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,8 @@ def add_field(self, name, function, sampling_type, **kwargs):
# Handle the case where the field has already been added.
if not override and name in self:
mylog.warning(
"Field %s already exists. To override use `force_override=True`.", name,
"Field %s already exists. To override use `force_override=True`.",
name,
)

return super(LocalFieldInfoContainer, self).add_field(
Expand Down
5 changes: 4 additions & 1 deletion yt/fields/tests/test_sph_fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,7 +20,10 @@

load_kwargs = defaultdict(dict)
load_kwargs.update(
{isothermal_h5: iso_kwargs, isothermal_bin: iso_kwargs,}
{
isothermal_h5: iso_kwargs,
isothermal_bin: iso_kwargs,
}
)

gas_fields_to_particle_fields = {
Expand Down
8 changes: 7 additions & 1 deletion yt/frontends/adaptahop/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,13 @@ def _setup_filenames(self):
]
else:
self.data_files = [
cls(self.dataset, self.io, self.dataset.parameter_filename, 0, None,)
cls(
self.dataset,
self.io,
self.dataset.parameter_filename,
0,
None,
)
]


Expand Down
8 changes: 4 additions & 4 deletions yt/frontends/art/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -882,10 +882,10 @@ def _count_art_octs(self, f, offset, MinLev, MaxLevelNow):

def _read_amr_level(self, oct_handler):
"""Open the oct file, read in octs level-by-level.
For each oct, only the position, index, level and domain
are needed - its position in the octree is found automatically.
The most important is finding all the information to feed
oct_handler.add
For each oct, only the position, index, level and domain
are needed - its position in the octree is found automatically.
The most important is finding all the information to feed
oct_handler.add
"""
self.level_offsets
f = open(self.ds._file_amr, "rb")
Expand Down
9 changes: 8 additions & 1 deletion yt/frontends/art/io.py
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,14 @@ def _yield_coordinates(self, data_file):

def _determine_field_size(pf, field, lspecies, ptmax):
pbool = np.zeros(len(lspecies), dtype="bool")
idxas = np.concatenate(([0,], lspecies[:-1]))
idxas = np.concatenate(
(
[
0,
],
lspecies[:-1],
)
)
idxbs = lspecies
if "specie" in field:
index = int(field.replace("specie", ""))
Expand Down
3 changes: 2 additions & 1 deletion yt/frontends/athena_pp/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -318,7 +318,8 @@ def _parse_parameter_file(self):
self._field_map = {}
k = 0
for dname, num_var in zip(
self._handle.attrs["DatasetNames"], self._handle.attrs["NumVariables"],
self._handle.attrs["DatasetNames"],
self._handle.attrs["NumVariables"],
):
for j in range(num_var):
fname = self._handle.attrs["VariableNames"][k].decode("ascii", "ignore")
Expand Down
8 changes: 2 additions & 6 deletions yt/frontends/boxlib/data_structures.py
Original file line number Diff line number Diff line change
Expand Up @@ -552,9 +552,7 @@ def _initialize_grid_arrays(self):
self.grid_start_index = np.zeros((self.num_grids, 3), "int64")

def _initialize_state_variables(self):
"""override to not re-initialize num_grids in AMRHierarchy.__init__
"""
"""override to not re-initialize num_grids in AMRHierarchy.__init__"""
self._parallel_locking = False
self._data_file = None
self._data_mode = None
Expand Down Expand Up @@ -995,9 +993,7 @@ def _read_particles(self):
self._read_particle_file(self.particle_filename)

def _read_particle_file(self, fn):
"""actually reads the orion particle data file itself.
"""
"""actually reads the orion particle data file itself."""
if not os.path.exists(fn):
return
with open(fn, "r") as f:
Expand Down
3 changes: 1 addition & 2 deletions yt/frontends/eagle/fields.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,8 +133,7 @@ def __init__(self, ds, field_list, slice_info=None):
)

def _create_ion_density_func(self, ftype, ion):
""" returns a function that calculates the ion density of a particle.
"""
"""returns a function that calculates the ion density of a particle."""

def _ion_density(field, data):

Expand Down
Loading

0 comments on commit 8904cb1

Please sign in to comment.